From 135ef72ef4574816839544dee73fc08cc249e340 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Wed, 17 Aug 2016 14:03:02 -0400
Subject: [PATCH] add all.rs, extracted from futures-rs

---
 futures-rs-test-all/Cargo.lock                |  14 +
 futures-rs-test-all/Cargo.toml                |  21 +
 futures-rs-test-all/FAQ.md                    | 108 +++
 futures-rs-test-all/LICENSE-APACHE            | 201 +++++
 futures-rs-test-all/LICENSE-MIT               |  25 +
 futures-rs-test-all/README.md                 |   3 +
 futures-rs-test-all/README.md~                | 100 +++
 futures-rs-test-all/appveyor.yml              |  21 +
 futures-rs-test-all/makefile                  |   9 +
 futures-rs-test-all/src/and_then.rs           |  46 ++
 futures-rs-test-all/src/chain.rs              |  64 ++
 futures-rs-test-all/src/collect.rs            | 123 ++++
 futures-rs-test-all/src/done.rs               |  45 ++
 futures-rs-test-all/src/empty.rs              |  39 +
 futures-rs-test-all/src/executor.rs           | 153 ++++
 futures-rs-test-all/src/failed.rs             |  46 ++
 futures-rs-test-all/src/finished.rs           |  48 ++
 futures-rs-test-all/src/flatten.rs            |  44 ++
 futures-rs-test-all/src/forget.rs             |  32 +
 futures-rs-test-all/src/fuse.rs               |  36 +
 futures-rs-test-all/src/impls.rs              |  45 ++
 futures-rs-test-all/src/join.rs               | 140 ++++
 futures-rs-test-all/src/lazy.rs               |  86 +++
 futures-rs-test-all/src/lib.rs                | 696 ++++++++++++++++++
 futures-rs-test-all/src/lock.rs               | 104 +++
 futures-rs-test-all/src/map.rs                |  43 ++
 futures-rs-test-all/src/map_err.rs            |  43 ++
 futures-rs-test-all/src/or_else.rs            |  47 ++
 futures-rs-test-all/src/poll.rs               |  79 ++
 futures-rs-test-all/src/promise.rs            | 155 ++++
 futures-rs-test-all/src/select.rs             | 121 +++
 futures-rs-test-all/src/select_all.rs         | 113 +++
 futures-rs-test-all/src/slot.rs               | 671 +++++++++++++++++
 futures-rs-test-all/src/store.rs              |  37 +
 futures-rs-test-all/src/stream/and_then.rs    |  59 ++
 futures-rs-test-all/src/stream/buffered.rs    |  99 +++
 futures-rs-test-all/src/stream/channel.rs     | 188 +++++
 futures-rs-test-all/src/stream/collect.rs     |  51 ++
 futures-rs-test-all/src/stream/filter.rs      |  47 ++
 futures-rs-test-all/src/stream/filter_map.rs  |  48 ++
 futures-rs-test-all/src/stream/flatten.rs     |  58 ++
 futures-rs-test-all/src/stream/fold.rs        |  89 +++
 futures-rs-test-all/src/stream/for_each.rs    |  48 ++
 futures-rs-test-all/src/stream/fuse.rs        |  47 ++
 futures-rs-test-all/src/stream/future.rs      |  37 +
 futures-rs-test-all/src/stream/impls.rs       |  15 +
 futures-rs-test-all/src/stream/iter.rs        |  49 ++
 futures-rs-test-all/src/stream/map.rs         |  39 +
 futures-rs-test-all/src/stream/map_err.rs     |  39 +
 futures-rs-test-all/src/stream/merge.rs       |  77 ++
 futures-rs-test-all/src/stream/mod.rs         | 645 ++++++++++++++++
 futures-rs-test-all/src/stream/or_else.rs     |  58 ++
 futures-rs-test-all/src/stream/skip.rs        |  42 ++
 futures-rs-test-all/src/stream/skip_while.rs  |  85 +++
 futures-rs-test-all/src/stream/take.rs        |  48 ++
 futures-rs-test-all/src/stream/then.rs        |  59 ++
 .../src/stream/thread_state.rs                |  46 ++
 futures-rs-test-all/src/task.rs               | 316 ++++++++
 futures-rs-test-all/src/then.rs               |  44 ++
 futures-rs-test-all/src/util.rs               |  40 +
 futures-rs-test-all/tests/all.rs              | 352 +++++++++
 futures-rs-test-all/tests/support/mod.rs      |  60 ++
 62 files changed, 6343 insertions(+)
 create mode 100644 futures-rs-test-all/Cargo.lock
 create mode 100644 futures-rs-test-all/Cargo.toml
 create mode 100644 futures-rs-test-all/FAQ.md
 create mode 100644 futures-rs-test-all/LICENSE-APACHE
 create mode 100644 futures-rs-test-all/LICENSE-MIT
 create mode 100644 futures-rs-test-all/README.md
 create mode 100644 futures-rs-test-all/README.md~
 create mode 100644 futures-rs-test-all/appveyor.yml
 create mode 100644 futures-rs-test-all/makefile
 create mode 100644 futures-rs-test-all/src/and_then.rs
 create mode 100644 futures-rs-test-all/src/chain.rs
 create mode 100644 futures-rs-test-all/src/collect.rs
 create mode 100644 futures-rs-test-all/src/done.rs
 create mode 100644 futures-rs-test-all/src/empty.rs
 create mode 100644 futures-rs-test-all/src/executor.rs
 create mode 100644 futures-rs-test-all/src/failed.rs
 create mode 100644 futures-rs-test-all/src/finished.rs
 create mode 100644 futures-rs-test-all/src/flatten.rs
 create mode 100644 futures-rs-test-all/src/forget.rs
 create mode 100644 futures-rs-test-all/src/fuse.rs
 create mode 100644 futures-rs-test-all/src/impls.rs
 create mode 100644 futures-rs-test-all/src/join.rs
 create mode 100644 futures-rs-test-all/src/lazy.rs
 create mode 100644 futures-rs-test-all/src/lib.rs
 create mode 100644 futures-rs-test-all/src/lock.rs
 create mode 100644 futures-rs-test-all/src/map.rs
 create mode 100644 futures-rs-test-all/src/map_err.rs
 create mode 100644 futures-rs-test-all/src/or_else.rs
 create mode 100644 futures-rs-test-all/src/poll.rs
 create mode 100644 futures-rs-test-all/src/promise.rs
 create mode 100644 futures-rs-test-all/src/select.rs
 create mode 100644 futures-rs-test-all/src/select_all.rs
 create mode 100644 futures-rs-test-all/src/slot.rs
 create mode 100644 futures-rs-test-all/src/store.rs
 create mode 100644 futures-rs-test-all/src/stream/and_then.rs
 create mode 100644 futures-rs-test-all/src/stream/buffered.rs
 create mode 100644 futures-rs-test-all/src/stream/channel.rs
 create mode 100644 futures-rs-test-all/src/stream/collect.rs
 create mode 100644 futures-rs-test-all/src/stream/filter.rs
 create mode 100644 futures-rs-test-all/src/stream/filter_map.rs
 create mode 100644 futures-rs-test-all/src/stream/flatten.rs
 create mode 100644 futures-rs-test-all/src/stream/fold.rs
 create mode 100644 futures-rs-test-all/src/stream/for_each.rs
 create mode 100644 futures-rs-test-all/src/stream/fuse.rs
 create mode 100644 futures-rs-test-all/src/stream/future.rs
 create mode 100644 futures-rs-test-all/src/stream/impls.rs
 create mode 100644 futures-rs-test-all/src/stream/iter.rs
 create mode 100644 futures-rs-test-all/src/stream/map.rs
 create mode 100644 futures-rs-test-all/src/stream/map_err.rs
 create mode 100644 futures-rs-test-all/src/stream/merge.rs
 create mode 100755 futures-rs-test-all/src/stream/mod.rs
 create mode 100644 futures-rs-test-all/src/stream/or_else.rs
 create mode 100644 futures-rs-test-all/src/stream/skip.rs
 create mode 100644 futures-rs-test-all/src/stream/skip_while.rs
 create mode 100644 futures-rs-test-all/src/stream/take.rs
 create mode 100644 futures-rs-test-all/src/stream/then.rs
 create mode 100644 futures-rs-test-all/src/stream/thread_state.rs
 create mode 100644 futures-rs-test-all/src/task.rs
 create mode 100644 futures-rs-test-all/src/then.rs
 create mode 100644 futures-rs-test-all/src/util.rs
 create mode 100644 futures-rs-test-all/tests/all.rs
 create mode 100644 futures-rs-test-all/tests/support/mod.rs

diff --git a/futures-rs-test-all/Cargo.lock b/futures-rs-test-all/Cargo.lock
new file mode 100644
index 000000000..18ac15936
--- /dev/null
+++ b/futures-rs-test-all/Cargo.lock
@@ -0,0 +1,14 @@
+[root]
+name = "futures"
+version = "0.1.0"
+dependencies = [
+ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "log"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
diff --git a/futures-rs-test-all/Cargo.toml b/futures-rs-test-all/Cargo.toml
new file mode 100644
index 000000000..84e510307
--- /dev/null
+++ b/futures-rs-test-all/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "futures"
+version = "0.1.0"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+license = "MIT/Apache-2.0"
+readme = "README.md"
+keywords = ["futures", "async", "future"]
+repository = "https://github.com/alexcrichton/futures-rs"
+homepage = "https://github.com/alexcrichton/futures-rs"
+documentation = "http://alexcrichton.com/futures-rs/futures/"
+description = """
+An implementation of futures and streams featuring zero allocations,
+composability, and iterator-like interfaces.
+"""
+
+[dependencies]
+log = "0.3"
+
+[replace."openssl:0.7.14"]
+git = "https://github.com/sfackler/rust-openssl"
+branch = "breaks"
diff --git a/futures-rs-test-all/FAQ.md b/futures-rs-test-all/FAQ.md
new file mode 100644
index 000000000..5179aa456
--- /dev/null
+++ b/futures-rs-test-all/FAQ.md
@@ -0,0 +1,108 @@
+# FAQ
+
+A collection of some commonly asked questions, with responses! If you find any
+of these unsatisfactory feel free to ping me (@alexcrichton) on github,c
+acrichto on IRC, or just by email!
+
+### Why `Send + 'static`?
+
+The `Future` trait and all of its associated items require `Send + 'static`.
+This expressed the constraint that all futures must be sendable across threads
+as well as not contain any borrowed data. A common question though is why not
+just let this fall out of the types themselves? That is, I'll have `Send +
+'static` futures if they happend to contain `Send + 'static` data.
+
+On a technical level this is not currently possible. Due to the `tailcall`
+method which flattens a chain of futures, futures commonly may store trait
+objects. As trait objects must decide on `Send` and `'static` early on, we opted
+to say "yes, futures will be both" early on.
+
+Doesn't this impose an extra cost though? Other libraries only require `'static`
+which allows one to use types like `Rc` and `RefCell` liberally. This is true
+that futures themselves cannot contain data like an `Rc`, but it is planned that
+through an *executor* you will be able to access non-`Send` data. This is not
+currently implemented, but is coming soon!
+
+The final reason is that almost all futures end up being `Send + 'static` in
+practice. This allows for a convenient implementation of driving futures by
+simply polling a future on whichever thread originates an event, ensuring a
+prompt resolution of a future if one is available. This, when combined with the
+technical difficulties and ergonomic concerns of *not* having `Send` and
+`'static`, led to the conclusion that the trait will require both.
+
+### Do futures work with multiple event loops?
+
+Yes! Futures are designed to source events from any location, including multiple
+event loops. All of the basic combinators will work on any number of event loops
+across any number of threads.
+
+### What if I have CPU intensive work?
+
+The documentation of the `Future::poll` function says that's it's supposed to
+"return quickly", what if I have work that doesn't return quickly! In this case
+it's intended that this work will run on a dedicated pool of threads intended
+for this sort of work, and a future to the returned value is used to represent
+its completion.
+
+A proof-of-concept method of doing this is the `futures-cpupool` crate in this
+repository, where you can execute work on a thread pool and receive a future to
+the value generated. This future is then composable with `and_then`, for
+example, to mesh in with the rest of a future's computation.
+
+### How do I call `poll` and `schedule`?
+
+Right now, call `.forget()`. That method will drive a future to completion and
+drop all associated resources as soon as it's completed.
+
+Eventually more flavorful methods of configuring a `Task` will be available, but
+right now `.forget()` is all we have.
+
+### How do I return a future?
+
+Returning a future is like returning an iterator in Rust today. It's not the
+easiest thing to do and you frequently need to resort to `Box` with a trait
+object. Thankfully though [`impl Trait`] is just around the corner and will
+allow returning these types unboxed in the future.
+
+[`impl Trait`]: https://github.com/rust-lang/rust/issues/34511
+
+For now though the cost of boxing shouldn't actually be that high. A future
+computation can be constructed *without boxing* and only the final step actually
+places a `Box` around the entire future. In that sense you're only paying the
+allocation at the very end, not for any of the intermediate futures.
+
+### Does it work on Windows?
+
+Yes! This library builds on top of mio, which works on Windows.
+
+### What version of Rust should I use?
+
+While the library compiles on stable and beta (as of 2016-08-02), the nightly
+release (1.13.0-nightly) is recommended due to Cargo workspaces and compiler bug
+fixes that make compilation much speedier.
+
+### Is it on crates.io?
+
+Not yet! A few names are reserved, but they're not functional. I'd use the git
+repository here for now.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/futures-rs-test-all/LICENSE-APACHE b/futures-rs-test-all/LICENSE-APACHE
new file mode 100644
index 000000000..16fe87b06
--- /dev/null
+++ b/futures-rs-test-all/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/futures-rs-test-all/LICENSE-MIT b/futures-rs-test-all/LICENSE-MIT
new file mode 100644
index 000000000..28e630cf4
--- /dev/null
+++ b/futures-rs-test-all/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/futures-rs-test-all/README.md b/futures-rs-test-all/README.md
new file mode 100644
index 000000000..6621dbcc8
--- /dev/null
+++ b/futures-rs-test-all/README.md
@@ -0,0 +1,3 @@
+This is extracted from the futures-rs repo. It contains a snapshot of
+a problematic case that can cause a lot of strain on the
+monomorphization phase of the compiler.
diff --git a/futures-rs-test-all/README.md~ b/futures-rs-test-all/README.md~
new file mode 100644
index 000000000..3048701aa
--- /dev/null
+++ b/futures-rs-test-all/README.md~
@@ -0,0 +1,100 @@
+# futures-rs
+
+This is an **experimental** work in progress library for an implementation of
+futures in Rust.
+
+[![Build Status](https://travis-ci.org/alexcrichton/futures-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/futures-rs)
+[![Build status](https://ci.appveyor.com/api/projects/status/yl5w3ittk4kggfsh?svg=true)](https://ci.appveyor.com/project/alexcrichton/futures-rs)
+[![Coverage Status](https://coveralls.io/repos/github/alexcrichton/futures-rs/badge.svg?branch=master)](https://coveralls.io/github/alexcrichton/futures-rs?branch=master)
+
+[Documentation]: http://alexcrichton.com/futures-rs
+
+## Usage
+
+First, add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures = { git = "https://github.com/alexcrichton/futures-rs" }
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate futures;
+
+use futures::Future;
+```
+
+And then, use futures!
+
+## Current status
+
+This library is currently a work in progress, but it is rapidly progressing to
+the point of stabilization of the internal `Future` and `Stream` traits along
+with a number of sourrounding pieces for useful functionality. Some crates are
+available on crates.io but are likely not functional, so the git repo should be
+used for now, but that will hopefully change soon!
+
+We're currently targeting a broader announcement and call to action (including
+improved docs, entry points for beginngers, places to contribute, etc) in early
+September, so stay tuned!
+
+## Why Futures?
+
+A major missing piece of the Rust ecosystem has been how to work with
+Asynchronous I/O and in general composing many I/O tasks together in a
+lightweight way across libraries. Futures have worked out fantastically in other
+languages to solve this problem in frameworks like [finagle] and [wangle], and
+they also turn out to be a great way to solve this problem in Rust as well!
+
+[finagle]: https://twitter.github.io/finagle/
+[wangle]: https://github.com/facebook/wangle
+
+The purpose of the `futures` library in this repository is to provide the
+foundational layer to build an ecosystem of futures-generating computations so
+they can all compose with one another.
+
+## The `Future` trait
+
+At the heart of this crate is [the `Future` trait][Future], which in true Rust
+style, is an interface for **zero allocation futures**. Like iterators in Rust
+the are a wide variety of helpful combinators associated with this trait which
+are also zero allocation and serve as a succinct and powerful way to compose
+computations on futures.
+
+[Future]: http://alexcrichton.com/futures-rs/futures/trait.Future.html
+
+The `Future` trait is driven by two methods, [`poll`][poll] and
+[`schedule`][schedule], which all pulling values out of a future and otherwise
+getting notified when a future is complete. More documentation can be found on
+the associated methods themselves.
+
+[poll]: http://alexcrichton.com/futures-rs/futures/trait.Future.html#tymethod.poll
+[schedule]: http://alexcrichton.com/futures-rs/futures/trait.Future.html#tymethod.schedule
+
+## I/O with futures
+
+With the power of zero-allocation futures we can take futures all the way down
+the stack to the I/O layer. The [`futures-io` crate][futures-io] provides an
+abstraction for I/O objects as a stream of readiness notifications plus `Read`
+and `Write` traits along with a number of combinators you'd expect when working
+with `std::io`.
+
+These abstractions can be implemented by the [`futures-mio` crate][futures-mio]
+to use [`mio`][mio] to power I/O. Finally we can then use these abstractions to
+build a [`futures-tls` crate][futures-tls] crate to provide TLS/SSL streams over
+arbitrary read/write streams.
+
+[futures-io]: http://alexcrichton.com/futures-rs/futures_io/index.html
+[futures-mio]: http://alexcrichton.com/futures-rs/futures_mio/index.html
+[futures-tls]: http://alexcrichton.com/futures-rs/futures_tls/index.html
+[mio]: https://github.com/carllerche/mio
+
+# License
+
+`futures-rs` is primarily distributed under the terms of both the MIT license and
+the Apache License (Version 2.0), with portions covered by various BSD-like
+licenses.
+
+See LICENSE-APACHE, and LICENSE-MIT for details.
diff --git a/futures-rs-test-all/appveyor.yml b/futures-rs-test-all/appveyor.yml
new file mode 100644
index 000000000..e81fde239
--- /dev/null
+++ b/futures-rs-test-all/appveyor.yml
@@ -0,0 +1,21 @@
+environment:
+  matrix:
+  - TARGET: x86_64-pc-windows-msvc
+install:
+  - ps: Import-Certificate -FilePath futures-tls/tests/schannel-ca.crt -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose
+  - curl -sSf -o rustup-init.exe https://win.rustup.rs/
+  - rustup-init.exe -y --default-host %TARGET% --default-toolchain nightly
+  - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
+  - rustc -V
+  - cargo -V
+
+build: false
+
+test_script:
+  - cargo test
+  - cargo test --manifest-path futures-io/Cargo.toml
+  - cargo test --manifest-path futures-iobuf/Cargo.toml
+  - cargo test --manifest-path futures-cpupool/Cargo.toml
+  - cargo test --manifest-path futures-mio/Cargo.toml
+  - cargo test --manifest-path futures-tls/Cargo.toml
+  - cargo test --manifest-path futures-minihttp/Cargo.toml
diff --git a/futures-rs-test-all/makefile b/futures-rs-test-all/makefile
new file mode 100644
index 000000000..87970257b
--- /dev/null
+++ b/futures-rs-test-all/makefile
@@ -0,0 +1,9 @@
+.PHONY: all touch clean
+
+all:
+	$(CARGO_BUILD) --test all
+touch:
+	rm -f target/debug/all-*
+clean:
+	rm target -rf
+	rm Cargo.lock
diff --git a/futures-rs-test-all/src/and_then.rs b/futures-rs-test-all/src/and_then.rs
new file mode 100644
index 000000000..115a4b61b
--- /dev/null
+++ b/futures-rs-test-all/src/and_then.rs
@@ -0,0 +1,46 @@
+use {Future, IntoFuture, Task, Poll};
+use chain::Chain;
+
+/// Future for the `and_then` combinator, chaining a computation onto the end of
+/// another future which completes successfully.
+///
+/// This is created by this `Future::and_then` method.
+pub struct AndThen<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> AndThen<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+          F: Send + 'static,
+{
+    AndThen {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for AndThen<A, B, F>
+    where A: Future,
+          B: IntoFuture<Error=A::Error>,
+          F: FnOnce(A::Item) -> B + Send + 'static,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<B::Item, B::Error> {
+        self.state.poll(task, |result, f| {
+            result.map(|e| {
+                Err(f(e).into_future())
+            })
+        })
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.state.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.state.tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/chain.rs b/futures-rs-test-all/src/chain.rs
new file mode 100644
index 000000000..e0f1f6533
--- /dev/null
+++ b/futures-rs-test-all/src/chain.rs
@@ -0,0 +1,64 @@
+use std::mem;
+
+use util::Collapsed;
+use {Future, Task, Poll};
+
+pub enum Chain<A, B, C> where A: Future, B: Send + 'static {
+    First(Collapsed<A>, C),
+    Second(B),
+    Done,
+}
+
+impl<A, B, C> Chain<A, B, C>
+    where A: Future,
+          B: Future,
+          C: Send + 'static,
+{
+    pub fn new(a: A, c: C) -> Chain<A, B, C> {
+        Chain::First(Collapsed::Start(a), c)
+    }
+
+    pub fn poll<F>(&mut self, task: &mut Task, f: F) -> Poll<B::Item, B::Error>
+        where F: FnOnce(Result<A::Item, A::Error>, C)
+                        -> Result<Result<B::Item, B>, B::Error> + Send + 'static,
+    {
+        let a_result = match *self {
+            Chain::First(ref mut a, _) => try_poll!(a.poll(task)),
+            Chain::Second(ref mut b) => return b.poll(task),
+            Chain::Done => panic!("cannot poll a chained future twice"),
+        };
+        let data = match mem::replace(self, Chain::Done) {
+            Chain::First(_, c) => c,
+            _ => panic!(),
+        };
+        match f(a_result, data) {
+            Ok(Ok(e)) => Poll::Ok(e),
+            Ok(Err(mut b)) => {
+                let ret = b.poll(task);
+                *self = Chain::Second(b);
+                ret
+            }
+            Err(e) => Poll::Err(e),
+        }
+    }
+
+    pub fn schedule(&mut self, task: &mut Task) {
+        match *self {
+            Chain::First(ref mut a, _) => a.schedule(task),
+            Chain::Second(ref mut b) => b.schedule(task),
+            Chain::Done => task.notify(),
+        }
+    }
+
+    pub fn tailcall(&mut self)
+                    -> Option<Box<Future<Item=B::Item, Error=B::Error>>> {
+        match *self {
+            Chain::First(ref mut a, _) => {
+                a.collapse();
+                None
+            }
+            Chain::Second(ref mut b) => b.tailcall(),
+            Chain::Done => None,
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/collect.rs b/futures-rs-test-all/src/collect.rs
new file mode 100644
index 000000000..b95bd1aed
--- /dev/null
+++ b/futures-rs-test-all/src/collect.rs
@@ -0,0 +1,123 @@
+use std::mem;
+
+use {Future, IntoFuture, Task, Poll};
+use util::Collapsed;
+
+/// A future which takes a list of futures and resolves with a vector of the
+/// completed values.
+///
+/// This future is created with the `collect` method.
+pub struct Collect<I>
+    where I: IntoIterator + Send + 'static,
+          I::Item: IntoFuture,
+          I::IntoIter: Send + 'static,
+{
+    cur: Option<Collapsed<<I::Item as IntoFuture>::Future>>,
+    remaining: I::IntoIter,
+    result: Vec<<I::Item as IntoFuture>::Item>,
+}
+
+/// Creates a future which represents a collection of the results of the futures
+/// given.
+///
+/// The returned future will execute each underlying future one at a time,
+/// collecting the results into a destination `Vec<T>`. If any future returns
+/// an error then all other futures will be canceled and an error will be
+/// returned immediately. If all futures complete successfully, however, then
+/// the returned future will succeed with a `Vec` of all the successful results.
+///
+/// Note that this function does **not** attempt to execute each future in
+/// parallel, they are all executed in sequence.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let f = collect(vec![
+///     finished::<u32, u32>(1),
+///     finished::<u32, u32>(2),
+///     finished::<u32, u32>(3),
+/// ]);
+/// let f = f.map(|x| {
+///     assert_eq!(x, [1, 2, 3]);
+/// });
+///
+/// let f = collect(vec![
+///     finished::<u32, u32>(1).boxed(),
+///     failed::<u32, u32>(2).boxed(),
+///     finished::<u32, u32>(3).boxed(),
+/// ]);
+/// let f = f.then(|x| {
+///     assert_eq!(x, Err(2));
+///     x
+/// });
+/// ```
+pub fn collect<I>(i: I) -> Collect<I>
+    where I: IntoIterator + Send + 'static,
+          I::Item: IntoFuture,
+          I::IntoIter: Send + 'static,
+{
+    let mut i = i.into_iter();
+    Collect {
+        cur: i.next().map(IntoFuture::into_future).map(Collapsed::Start),
+        remaining: i,
+        result: Vec::new(),
+    }
+}
+
+impl<I> Future for Collect<I>
+    where I: IntoIterator + Send + 'static,
+          I::IntoIter: Send + 'static,
+          I::Item: IntoFuture,
+{
+    type Item = Vec<<I::Item as IntoFuture>::Item>;
+    type Error = <I::Item as IntoFuture>::Error;
+
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        loop {
+            match self.cur {
+                Some(ref mut cur) => {
+                    match try_poll!(cur.poll(task)) {
+                        Ok(e) => self.result.push(e),
+
+                        // If we hit an error, drop all our associated resources
+                        // ASAP.
+                        Err(e) => {
+                            for f in self.remaining.by_ref() {
+                                drop(f);
+                            }
+                            for f in self.result.drain(..) {
+                                drop(f);
+                            }
+                            return Poll::Err(e)
+                        }
+                    }
+                }
+                None => {
+                    return Poll::Ok(mem::replace(&mut self.result, Vec::new()))
+                }
+            }
+
+            self.cur = self.remaining.next()
+                           .map(IntoFuture::into_future)
+                           .map(Collapsed::Start);
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if let Some(ref mut cur) = self.cur {
+            cur.schedule(task);
+        }
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        if let Some(ref mut cur) = self.cur {
+            cur.collapse();
+        }
+        None
+    }
+
+}
diff --git a/futures-rs-test-all/src/done.rs b/futures-rs-test-all/src/done.rs
new file mode 100644
index 000000000..d332b2ae5
--- /dev/null
+++ b/futures-rs-test-all/src/done.rs
@@ -0,0 +1,45 @@
+use {Future, Task, Poll};
+
+/// A future representing a value that is immediately ready.
+///
+/// Created by the `done` function.
+pub struct Done<T, E> {
+    inner: Option<Result<T, E>>,
+}
+
+/// Creates a new "leaf future" which will resolve with the given result.
+///
+/// The returned future represents a computation which is finshed immediately.
+/// This can be useful with the `finished` and `failed` base future types to
+/// convert an immediate value to a future to interoperate elsewhere.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let future_of_1 = done::<u32, u32>(Ok(1));
+/// let future_of_err_2 = done::<u32, u32>(Err(2));
+/// ```
+pub fn done<T, E>(r: Result<T, E>) -> Done<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    Done { inner: Some(r) }
+}
+
+impl<T, E> Future for Done<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, _task: &mut Task) -> Poll<T, E> {
+        self.inner.take().expect("cannot poll Done twice").into()
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        task.notify();
+    }
+}
diff --git a/futures-rs-test-all/src/empty.rs b/futures-rs-test-all/src/empty.rs
new file mode 100644
index 000000000..7fd348d71
--- /dev/null
+++ b/futures-rs-test-all/src/empty.rs
@@ -0,0 +1,39 @@
+use std::marker;
+
+use {Future, Task, Poll};
+
+/// A future which is never resolved.
+///
+/// This future can be created with the `empty` function.
+pub struct Empty<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    _data: marker::PhantomData<(T, E)>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// The returned future will never resolve with a success but is still
+/// susceptible to cancellation. That is, if a callback is scheduled on the
+/// returned future, it is only run once the future is dropped (canceled).
+pub fn empty<T: Send + 'static, E: Send + 'static>() -> Empty<T, E> {
+    Empty { _data: marker::PhantomData }
+}
+
+impl<T, E> Future for Empty<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, _: &mut Task) -> Poll<T, E> {
+        Poll::NotReady
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        drop(task);
+    }
+}
diff --git a/futures-rs-test-all/src/executor.rs b/futures-rs-test-all/src/executor.rs
new file mode 100644
index 000000000..f2bfc7ab7
--- /dev/null
+++ b/futures-rs-test-all/src/executor.rs
@@ -0,0 +1,153 @@
+//! Work in progress implementation of executors for Futures.
+//!
+//! Note that this interface is very likely to change and not stay as-is, and it
+//! is not currently used much by futures beyond `DEFAULT`.
+
+use std::cell::{Cell, RefCell};
+use std::sync::Arc;
+
+/// Encapsulation of a value which has the ability to execute arbitrary code.
+///
+/// This trait is object safe and intended to be used through pointers like
+/// `Box` and `Arc.
+pub trait Executor: Send + Sync + 'static {
+    /// Executes the given closure `f`, perhaps on a different thread or
+    /// deferred to a later time.
+    ///
+    /// This method may not execute `f` immediately, but it will arrange for the
+    /// callback to be invoked "in the near future".
+    fn execute<F>(&self, f: F)
+        where F: FnOnce() + Send + 'static,
+              Self: Sized
+    {
+        self.execute_boxed(Box::new(f))
+    }
+
+    /// Object-safe method of the above interface used when implementing trait
+    /// objects.
+    ///
+    /// This should not be called direclty and instead `execute` should be used.
+    fn execute_boxed(&self, f: Box<ExecuteCallback>);
+}
+
+/// The default executor, used by futures by default currently.
+pub static DEFAULT: Limited = Limited;
+
+impl<T: Executor + ?Sized + Send + Sync + 'static> Executor for Box<T> {
+    fn execute_boxed(&self, f: Box<ExecuteCallback>) {
+        (**self).execute_boxed(f)
+    }
+}
+
+impl<T: Executor + ?Sized + Send + Sync + 'static> Executor for Arc<T> {
+    fn execute_boxed(&self, f: Box<ExecuteCallback>) {
+        (**self).execute_boxed(f)
+    }
+}
+
+/// Essentially `Box<FnOnce() + Send>`, just as a trait.
+pub trait ExecuteCallback: Send + 'static {
+    #[allow(missing_docs)]
+    fn call(self: Box<Self>);
+}
+
+impl<F: FnOnce() + Send + 'static> ExecuteCallback for F {
+    fn call(self: Box<F>) {
+        (*self)()
+    }
+}
+
+/// Implementation of an `Executor` which just executes everything immediately
+/// as soon as it's passed in.
+pub struct Inline;
+
+impl Executor for Inline {
+    fn execute<F: FnOnce() + Send + 'static>(&self, f: F) {
+        f()
+    }
+
+    fn execute_boxed(&self, f: Box<ExecuteCallback>) {
+        f.call()
+    }
+}
+
+/// Implementation of an executor which executes all callbacks immediately, but
+/// bounds the amount of recursion to prevent blowing the stack.
+pub struct Limited;
+
+thread_local!(static LIMITED: LimitState = LimitState::new());
+
+const LIMIT: usize = 100;
+
+struct LimitState {
+    count: Cell<usize>,
+    deferred: RefCell<Vec<Box<ExecuteCallback>>>,
+}
+
+impl Executor for Limited {
+    fn execute<F>(&self, f: F) where F: FnOnce() + Send + 'static {
+        LIMITED.with(|state| state.execute(f))
+    }
+    fn execute_boxed(&self, f: Box<ExecuteCallback>) {
+        self.execute(|| f.call());
+    }
+}
+
+impl LimitState {
+    fn new() -> LimitState {
+        LimitState {
+            count: Cell::new(0),
+            deferred: RefCell::new(Vec::new()),
+        }
+    }
+
+    fn execute<F>(&self, f: F) where F: FnOnce() + Send + 'static {
+        match self.count.get() {
+            0 => {
+                self.count.set(1);
+                f();
+                loop {
+                    let cb = self.deferred.borrow_mut().pop();
+                    match cb {
+                        Some(f) => f.call(),
+                        None => break,
+                    }
+                }
+                self.count.set(0);
+            }
+            n if n < LIMIT => {
+                self.count.set(n + 1);
+                f();
+                self.count.set(n);
+            }
+            _ => self.deferred.borrow_mut().push(Box::new(f)),
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Arc;
+    use std::sync::atomic::{AtomicUsize, Ordering};
+
+    use super::{Executor, Limited};
+
+    #[test]
+    fn limited() {
+        fn doit(ex: Arc<Executor>, hits: Arc<AtomicUsize>, i: usize) {
+            if i == 0 {
+                return
+            }
+            hits.fetch_add(1, Ordering::SeqCst);
+            let ex2 = ex.clone();
+            ex.execute(move || {
+                doit(ex2, hits, i - 1);
+            })
+        }
+
+        let n = 1_000_000;
+        let hits = Arc::new(AtomicUsize::new(0));
+        doit(Arc::new(Limited), hits.clone(), n);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+    }
+}
diff --git a/futures-rs-test-all/src/failed.rs b/futures-rs-test-all/src/failed.rs
new file mode 100644
index 000000000..75211b6c4
--- /dev/null
+++ b/futures-rs-test-all/src/failed.rs
@@ -0,0 +1,46 @@
+use std::marker;
+
+use {Future, Task, Poll};
+
+/// A future representing a finished but erroneous computation.
+///
+/// Created by the `failed` function.
+pub struct Failed<T, E> {
+    _t: marker::PhantomData<T>,
+    e: Option<E>,
+}
+
+/// Creates a "leaf future" from an immediate value of a failed computation.
+///
+/// The returned future is similar to `done` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let future_of_err_1 = failed::<u32, u32>(1);
+/// ```
+pub fn failed<T, E>(e: E) -> Failed<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    Failed { _t: marker::PhantomData, e: Some(e) }
+}
+
+impl<T, E> Future for Failed<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, _: &mut Task) -> Poll<T, E> {
+        Poll::Err(self.e.take().expect("cannot poll Failed twice"))
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        task.notify();
+    }
+}
diff --git a/futures-rs-test-all/src/finished.rs b/futures-rs-test-all/src/finished.rs
new file mode 100644
index 000000000..948860e0f
--- /dev/null
+++ b/futures-rs-test-all/src/finished.rs
@@ -0,0 +1,48 @@
+use std::marker;
+
+use {Future, Task, Poll};
+
+/// A future representing a finished successful computation.
+///
+/// Created by the `finished` function.
+pub struct Finished<T, E> {
+    t: Option<T>,
+    _e: marker::PhantomData<E>,
+}
+
+/// Creates a "leaf future" from an immediate value of a finished and
+/// successful computation.
+///
+/// The returned future is similar to `done` where it will immediately run a
+/// scheduled callback with the provided value.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let future_of_1 = finished::<u32, u32>(1);
+/// ```
+pub fn finished<T, E>(t: T) -> Finished<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    Finished { t: Some(t), _e: marker::PhantomData }
+}
+
+impl<T, E> Future for Finished<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+
+    fn poll(&mut self, _: &mut Task) -> Poll<T, E> {
+        Poll::Ok(self.t.take().expect("cannot poll Finished twice"))
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        task.notify();
+    }
+}
diff --git a/futures-rs-test-all/src/flatten.rs b/futures-rs-test-all/src/flatten.rs
new file mode 100644
index 000000000..0a790de28
--- /dev/null
+++ b/futures-rs-test-all/src/flatten.rs
@@ -0,0 +1,44 @@
+use {Future, IntoFuture, Task, Poll};
+use chain::Chain;
+
+/// Future for the `flatten` combinator, flattening a future-of-a-future to get just
+/// the result of the final future.
+///
+/// This is created by this `Future::flatten` method.
+pub struct Flatten<A> where A: Future, A::Item: IntoFuture {
+    state: Chain<A, <A::Item as IntoFuture>::Future, ()>,
+}
+
+pub fn new<A>(future: A) -> Flatten<A>
+    where A: Future,
+          A::Item: IntoFuture,
+{
+    Flatten {
+        state: Chain::new(future, ()),
+    }
+}
+
+impl<A> Future for Flatten<A>
+    where A: Future,
+          A::Item: IntoFuture,
+          <<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error>
+{
+    type Item = <<A as Future>::Item as IntoFuture>::Item;
+    type Error = <<A as Future>::Item as IntoFuture>::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        self.state.poll(task, |a, ()| {
+            let future = try!(a).into_future();
+            Ok(Err(future))
+        })
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.state.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.state.tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/forget.rs b/futures-rs-test-all/src/forget.rs
new file mode 100644
index 000000000..13c481074
--- /dev/null
+++ b/futures-rs-test-all/src/forget.rs
@@ -0,0 +1,32 @@
+use {Future, Poll, Task};
+
+pub fn forget<T: Future>(t: T) {
+    let thunk = ThunkFuture { inner: t.boxed() }.boxed();
+    Task::new().run(thunk)
+}
+
+// FIXME(rust-lang/rust#34416) should just be able to use map/map_err, but that
+//                             causes trans to go haywire.
+struct ThunkFuture<T, E> {
+    inner: Box<Future<Item=T, Error=E>>,
+}
+
+impl<T: Send + 'static, E: Send + 'static> Future for ThunkFuture<T, E> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self, task: &mut Task) -> Poll<(), ()> {
+        self.inner.poll(task).map(|_| ()).map_err(|_| ())
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.inner.schedule(task)
+    }
+
+    fn tailcall(&mut self) -> Option<Box<Future<Item=(), Error=()>>> {
+        if let Some(f) = self.inner.tailcall() {
+            self.inner = f;
+        }
+        None
+    }
+}
diff --git a/futures-rs-test-all/src/fuse.rs b/futures-rs-test-all/src/fuse.rs
new file mode 100644
index 000000000..ee4fbcf5a
--- /dev/null
+++ b/futures-rs-test-all/src/fuse.rs
@@ -0,0 +1,36 @@
+use {Future, Task, Poll};
+
+/// A future which "fuse"s a future once it's been resolved.
+///
+/// Normally futures can behave unpredictable once they're used after a future
+/// has been resolved, but `Fuse` is always defined to return `None` from `poll`
+/// after it has succeeded, and after it has succeeded all future calls to
+/// `schedule` will be ignored.
+pub struct Fuse<A> {
+    future: Option<A>,
+}
+
+pub fn new<A: Future>(f: A) -> Fuse<A> {
+    Fuse {
+        future: Some(f),
+    }
+}
+
+impl<A: Future> Future for Fuse<A> {
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<A::Item, A::Error> {
+        let ret = self.future.as_mut().map(|f| f.poll(task));
+        if ret.as_ref().map(|r| r.is_ready()) == Some(true) {
+            self.future = None;
+        }
+        return ret.unwrap_or(Poll::NotReady)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if let Some(ref mut f) = self.future {
+            f.schedule(task);
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/impls.rs b/futures-rs-test-all/src/impls.rs
new file mode 100644
index 000000000..8fbc39150
--- /dev/null
+++ b/futures-rs-test-all/src/impls.rs
@@ -0,0 +1,45 @@
+use std::mem;
+
+use {Future, empty, Poll, Task};
+
+impl<T, E> Future for Box<Future<Item=T, Error=E>>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        (**self).poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        (**self).schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        if let Some(f) = (**self).tailcall() {
+            return Some(f)
+        }
+        Some(mem::replace(self, Box::new(empty())))
+    }
+}
+
+impl<F: Future> Future for Box<F> {
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        (**self).poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        (**self).schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        (**self).tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/join.rs b/futures-rs-test-all/src/join.rs
new file mode 100644
index 000000000..893de7e6a
--- /dev/null
+++ b/futures-rs-test-all/src/join.rs
@@ -0,0 +1,140 @@
+#![allow(non_snake_case)]
+
+use std::mem;
+
+use {Future, Task, Poll};
+use util::Collapsed;
+
+macro_rules! generate {
+    ($(($Join:ident, $new:ident, <A, $($B:ident),*>),)*) => ($(
+        /// Future for the `join` combinator, waiting for two futures to
+        /// complete.
+        ///
+        /// This is created by this `Future::join` method.
+        pub struct $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            a: MaybeDone<A>,
+            $($B: MaybeDone<$B>,)*
+        }
+
+        pub fn $new<A, $($B),*>(a: A, $($B: $B),*) -> $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            let a = Collapsed::Start(a);
+            $(let $B = Collapsed::Start($B);)*
+            $Join {
+                a: MaybeDone::NotYet(a),
+                $($B: MaybeDone::NotYet($B)),*
+            }
+        }
+
+        impl<A, $($B),*> $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            fn erase(&mut self) {
+                self.a = MaybeDone::Gone;
+                $(self.$B = MaybeDone::Gone;)*
+            }
+        }
+
+        impl<A, $($B),*> Future for $Join<A, $($B),*>
+            where A: Future,
+                  $($B: Future<Error=A::Error>),*
+        {
+            type Item = (A::Item, $($B::Item),*);
+            type Error = A::Error;
+
+            fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+                let mut all_done = match self.a.poll(task) {
+                    Ok(done) => done,
+                    Err(e) => {
+                        self.erase();
+                        return Poll::Err(e)
+                    }
+                };
+                $(
+                    all_done = match self.$B.poll(task) {
+                        Ok(done) => all_done && done,
+                        Err(e) => {
+                            self.erase();
+                            return Poll::Err(e)
+                        }
+                    };
+                )*
+
+                if all_done {
+                    Poll::Ok((self.a.take(), $(self.$B.take()),*))
+                } else {
+                    Poll::NotReady
+                }
+            }
+
+            fn schedule(&mut self, task: &mut Task) {
+                if let MaybeDone::NotYet(ref mut a) = self.a {
+                    a.schedule(task);
+                }
+                $(
+                    if let MaybeDone::NotYet(ref mut a) = self.$B {
+                        a.schedule(task);
+                    }
+                )*
+            }
+
+            fn tailcall(&mut self)
+                        -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+                self.a.collapse();
+                $(self.$B.collapse();)*
+                None
+            }
+        }
+    )*)
+}
+
+generate! {
+    (Join, new, <A, B>),
+    (Join3, new3, <A, B, C>),
+    (Join4, new4, <A, B, C, D>),
+    (Join5, new5, <A, B, C, D, E>),
+}
+
+enum MaybeDone<A: Future> {
+    NotYet(Collapsed<A>),
+    Done(A::Item),
+    Gone,
+}
+
+impl<A: Future> MaybeDone<A> {
+    fn poll(&mut self, task: &mut Task) -> Result<bool, A::Error> {
+        let res = match *self {
+            MaybeDone::NotYet(ref mut a) => a.poll(task),
+            MaybeDone::Done(_) => return Ok(true),
+            MaybeDone::Gone => panic!("cannot poll Join twice"),
+        };
+        match res {
+            Poll::Ok(res) => {
+                *self = MaybeDone::Done(res);
+                Ok(true)
+            }
+            Poll::Err(res) => Err(res),
+            Poll::NotReady => Ok(false),
+        }
+    }
+
+    fn take(&mut self) -> A::Item {
+        match mem::replace(self, MaybeDone::Gone) {
+            MaybeDone::Done(a) => a,
+            _ => panic!(),
+        }
+    }
+
+    fn collapse(&mut self) {
+        match *self {
+            MaybeDone::NotYet(ref mut a) => a.collapse(),
+            _ => {}
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/lazy.rs b/futures-rs-test-all/src/lazy.rs
new file mode 100644
index 000000000..a41d7fca6
--- /dev/null
+++ b/futures-rs-test-all/src/lazy.rs
@@ -0,0 +1,86 @@
+use std::mem;
+
+use {Future, IntoFuture, Task, Poll};
+
+/// A future which defers creation of the actual future until a callback is
+/// scheduled.
+///
+/// This is created by the `lazy` function.
+pub struct Lazy<F, R> {
+    inner: _Lazy<F, R>,
+}
+
+enum _Lazy<F, R> {
+    First(F),
+    Second(R),
+    Moved,
+}
+
+/// Creates a new future which will eventually be the same as the one created
+/// by the closure provided.
+///
+/// The provided closure is only run once the future has a callback scheduled
+/// on it, otherwise the callback never runs. Once run, however, this future is
+/// the same as the one the closure creates.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let a = lazy(|| finished::<u32, u32>(1));
+///
+/// let b = lazy(|| -> Done<u32, u32> {
+///     panic!("oh no!")
+/// });
+/// drop(b); // closure is never run
+/// ```
+pub fn lazy<F, R>(f: F) -> Lazy<F, R::Future>
+    where F: FnOnce() -> R + Send + 'static,
+          R: IntoFuture
+{
+    Lazy {
+        inner: _Lazy::First(f),
+    }
+}
+
+impl<F, R> Lazy<F, R::Future>
+    where F: FnOnce() -> R + Send + 'static,
+          R: IntoFuture,
+{
+    fn get(&mut self) -> &mut R::Future {
+        match self.inner {
+            _Lazy::First(_) => {}
+            _Lazy::Second(ref mut f) => return f,
+            _Lazy::Moved => panic!(), // can only happen if `f()` panics
+        }
+        match mem::replace(&mut self.inner, _Lazy::Moved) {
+            _Lazy::First(f) => self.inner = _Lazy::Second(f().into_future()),
+            _ => panic!(), // we already found First
+        }
+        match self.inner {
+            _Lazy::Second(ref mut f) => f,
+            _ => panic!(), // we just stored Second
+        }
+    }
+}
+
+impl<F, R> Future for Lazy<F, R::Future>
+    where F: FnOnce() -> R + Send + 'static,
+          R: IntoFuture,
+{
+    type Item = R::Item;
+    type Error = R::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<R::Item, R::Error> {
+        self.get().poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.get().schedule(task)
+    }
+
+    fn tailcall(&mut self) -> Option<Box<Future<Item=R::Item, Error=R::Error>>> {
+        self.get().tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/lib.rs b/futures-rs-test-all/src/lib.rs
new file mode 100644
index 000000000..8bcbcf7bf
--- /dev/null
+++ b/futures-rs-test-all/src/lib.rs
@@ -0,0 +1,696 @@
+//! A work-in-progress futures library for Rust.
+//!
+//! This library is an **experimental** implementation of Futures in Rust, and
+//! is very likely to change over time and break compatibility without notice.
+//! Be warned!
+//!
+//! The documentation of this library is also very much a work in progress, but
+//! if anything is unclear please open an issue and hopefully it'll be
+//! documented quickly!
+//!
+//! For more information, see the [README for now][readme]
+//!
+//! [readme]: https://github.com/alexcrichton/futures-rs#futures-rs
+
+#![deny(missing_docs)]
+
+#[macro_use]
+extern crate log;
+
+mod lock;
+mod slot;
+mod util;
+
+#[macro_use]
+mod poll;
+pub use poll::Poll;
+
+mod task;
+pub use task::{Task, TaskData, TaskHandle};
+
+pub mod executor;
+
+// Primitive futures
+mod collect;
+mod done;
+mod empty;
+mod failed;
+mod finished;
+mod lazy;
+mod promise;
+mod store;
+pub use collect::{collect, Collect};
+pub use done::{done, Done};
+pub use empty::{empty, Empty};
+pub use failed::{failed, Failed};
+pub use finished::{finished, Finished};
+pub use lazy::{lazy, Lazy};
+pub use promise::{promise, Promise, Complete, Canceled};
+pub use store::{store, Store};
+
+// combinators
+mod and_then;
+mod flatten;
+mod fuse;
+mod join;
+mod map;
+mod map_err;
+mod or_else;
+mod select;
+mod select_all;
+mod then;
+pub use and_then::AndThen;
+pub use flatten::Flatten;
+pub use fuse::Fuse;
+pub use join::{Join, Join3, Join4, Join5};
+pub use map::Map;
+pub use map_err::MapErr;
+pub use or_else::OrElse;
+pub use select::{Select, SelectNext};
+pub use select_all::{SelectAll, SelectAllNext, select_all};
+pub use then::Then;
+
+// streams
+pub mod stream;
+
+// impl details
+mod chain;
+mod impls;
+mod forget;
+
+macro_rules! join {
+    ($(($join:ident, $Join:ident, $new:ident, <$($name:ident: $B:ident),*>),)*) => ($(
+        /// Same as `join`, but with more futures.
+        fn $join<$($B),*>(self, $($name: $B),*) -> $Join<Self, $($B::Future),*>
+            where $($B: IntoFuture<Error=Self::Error>,)*
+                  Self: Sized,
+        {
+            join::$new(self, $($name.into_future()),*)
+        }
+    )*)
+}
+
+/// Trait for types which represent a placeholder of a value that will become
+/// available at possible some later point in time.
+///
+/// Futures are used to provide a sentinel through which a value can be
+/// referenced. They crucially allow chaining operations through consumption
+/// which allows expressing entire trees of computation as one sentinel value.
+///
+/// The ergonomics and implementation of the `Future` trait are very similar to
+/// the `Iterator` trait in Rust which is where there is a small handful of
+/// methods to implement and a load of default methods that consume a `Future`,
+/// producing a new value.
+///
+/// # Core methods
+///
+/// The core methods of futures, currently `poll`, `schedule`, and `tailcall`,
+/// are not intended to be called in general. These are used to drive an entire
+/// task of many futures composed together only from the top level.
+///
+/// More documentation can be found on each method about what its purpose is,
+/// but in general all of the combinators are the main methods that should be
+/// used.
+///
+/// # Combinators
+///
+/// Like iterators, futures provide a large number of combinators to work with
+/// futures to express computations in a much more natural method than
+/// scheduling a number of callbacks. For example the `map` method can change
+/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
+/// create a future after the first one is done and only be resolved when the
+/// second is done.
+///
+/// Combinators act very similarly to the methods on the `Iterator` trait itself
+/// or those on `Option` and `Result`. Like with iterators, the combinators are
+/// zero-cost and don't impose any extra layers of indirection you wouldn't
+/// otherwise have to write down.
+// TODO: expand this
+pub trait Future: Send + 'static {
+
+    /// The type of value that this future will resolved with if it is
+    /// successful.
+    type Item: Send + 'static;
+
+    /// The type of error that this future will resolve with if it fails in a
+    /// normal fashion.
+    ///
+    /// Futures may also fail due to panics or cancellation, but that is
+    /// expressed through the `PollError` type, not this type.
+    type Error: Send + 'static;
+
+    /// Query this future to see if its value has become available.
+    ///
+    /// This function will check the internal state of the future and assess
+    /// whether the value is ready to be produced. Implementors of this function
+    /// should ensure that a call to this **never blocks** as event loops may
+    /// not work properly otherwise.
+    ///
+    /// Callers of this function must provide the "task" in which the future is
+    /// running through the `task` argument. This task contains information like
+    /// task-local variables which the future may have stored references to
+    /// internally.
+    ///
+    /// # Runtime characteristics
+    ///
+    /// This function, `poll`, is the primary method for 'making progress'
+    /// within a tree of futures. For example this method will be called
+    /// repeatedly as the internal state machine makes its various transitions.
+    /// Additionally, this function may not necessarily have many guarantees
+    /// about *where* it's run (e.g. always on an I/O thread or not). Unless it
+    /// is otherwise arranged to be so, it should be ensured that
+    /// **implementations of this function finish very quickly**.
+    ///
+    /// This prevents unnecessarily clogging up threads and/or event loops while
+    /// a `poll` function call, for example, takes up compute resources to
+    /// perform some expensive computation. If it is known ahead of time that a
+    /// call to `poll` may end up taking awhile, the work should be offloaded to
+    /// a thread pool (or something similar) to ensure that `poll` can return
+    /// quickly.
+    ///
+    /// # Return value
+    ///
+    /// This function returns `Poll::NotReady` if the future is not ready yet,
+    /// or `Poll::{Ok,Err}` with the result of this future if it's ready. Once
+    /// a future has returned `Some` it is considered a contract error to
+    /// continue polling it.
+    ///
+    /// # Panics
+    ///
+    /// Once a future has completed (returned `Poll::{Ok, Err}` from `poll`),
+    /// then any future calls to `poll` may panic, block forever, or otherwise
+    /// cause wrong behavior. The `Future` trait itself provides no guarantees
+    /// about the behavior of `poll` after `Some` has been returned at least
+    /// once.
+    ///
+    /// Callers who may call `poll` too many times may want to consider using
+    /// the `fuse` adaptor which defines the behavior of `poll`, but comes with
+    /// a little bit of extra cost.
+    ///
+    /// # Errors
+    ///
+    /// This future may have failed to finish the computation, in which case
+    /// the `Poll::Err` variant will be returned with an appropriate payload of
+    /// an error.
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error>;
+
+    /// Schedule a task to be notified when this future is ready.
+    ///
+    /// Throughout the lifetime of a future it may frequently be `poll`'d on to
+    /// test whether the value is ready yet. If `None` is returned, however, the
+    /// caller may then register interest via this function to get a
+    /// notification when the future can indeed make progress.
+    ///
+    /// The `task` argument provided is the same task as provided to `poll`, and
+    /// it's the overall task which is driving this future. The task will be
+    /// notified through the `TaskHandle` type generated from the `handle`
+    /// method, and spurious notifications are allowed. That is, it's ok for a
+    /// notification to be received which when the future is poll'd it still
+    /// isn't complete.
+    ///
+    /// Implementors of the `Future` trait are recommended to just blindly pass
+    /// around this task rather than attempt to manufacture new tasks.
+    ///
+    /// When the `task` is notified it will be provided a set of tokens that
+    /// represent the set of events which have happened since it was last called
+    /// (or the last call to `poll`). These events can then be used by the task
+    /// to later inform `poll` calls to not poll too much.
+    ///
+    /// # Multiple calls to `schedule`
+    ///
+    /// This function cannot be used to queue up multiple tasks to be notified
+    /// when a future is ready to make progress. Only the most recent call to
+    /// `schedule` is guaranteed to have notifications received when `schedule`
+    /// is called multiple times.
+    ///
+    /// If this function is called twice, it may be the case that the previous
+    /// task is never notified. It is recommended that this function is called
+    /// with the same task for the entire lifetime of this future.
+    ///
+    /// # Panics
+    ///
+    /// Once a future has returned `Some` (it's been completed) then future
+    /// calls to either `poll` or this function, `schedule`, should not be
+    /// expected to behave well. A call to `schedule` after a poll has succeeded
+    /// may panic, block forever, or otherwise exhibit odd behavior.
+    ///
+    /// Callers who may call `schedule` after a future is finished may want to
+    /// consider using the `fuse` adaptor which defines the behavior of
+    /// `schedule` after a successful poll, but comes with a little bit of
+    /// extra cost.
+    fn schedule(&mut self, task: &mut Task);
+
+    /// Perform tail-call optimization on this future.
+    ///
+    /// A particular future may actually represent a large tree of computation,
+    /// the structure of which can be optimized periodically after some of the
+    /// work has completed. This function is intended to be called after an
+    /// unsuccessful `poll` to ensure that the computation graph of a future
+    /// remains at a reasonable size.
+    ///
+    /// This function is intended to be idempotent. If `None` is returned then
+    /// the internal structure may have been optimized, but this future itself
+    /// must stick around to represent the computation at hand.
+    ///
+    /// If `Some` is returned then the returned future will be realized with the
+    /// same value that this future *would* have been had this method not been
+    /// called. Essentially, if `Some` is returned, then this future can be
+    /// forgotten and instead the returned value is used.
+    ///
+    /// Note that this is a default method which returns `None`, but any future
+    /// adaptor should implement it to flatten the underlying future, if any.
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        None
+    }
+
+    /// Convenience function for turning this future into a trait object.
+    ///
+    /// This simply avoids the need to write `Box::new` and can often help with
+    /// type inference as well by always returning a trait object.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let a: Box<Future<Item=i32, Error=i32>> = done(Ok(1)).boxed();
+    /// ```
+    fn boxed(self) -> Box<Future<Item=Self::Item, Error=Self::Error>>
+        where Self: Sized
+    {
+        Box::new(self)
+    }
+
+    /// Map this future's result to a different type, returning a new future of
+    /// the resulting type.
+    ///
+    /// This function is similar to the `Option::map` or `Iterator::map` where
+    /// it will change the type of the underlying future. This is useful to
+    /// chain along a computation once a future has been resolved.
+    ///
+    /// The closure provided will only be called if this future is resolved
+    /// successfully. If this future returns an error, panics, or is canceled,
+    /// then the closure provided will never be invoked.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_1 = finished::<u32, u32>(1);
+    /// let future_of_4 = future_of_1.map(|x| x + 3);
+    /// ```
+    fn map<F, U>(self, f: F) -> Map<Self, F>
+        where F: FnOnce(Self::Item) -> U + Send + 'static,
+              U: Send + 'static,
+              Self: Sized,
+    {
+        assert_future::<U, Self::Error, _>(map::new(self, f))
+    }
+
+    /// Map this future's error to a different error, returning a new future.
+    ///
+    /// This function is similar to the `Result::map_err` where it will change
+    /// the error type of the underlying future. This is useful for example to
+    /// ensure that futures have the same error type when used with combinators
+    /// like `select` and `join`.
+    ///
+    /// The closure provided will only be called if this future is resolved
+    /// with an error. If this future returns a success, panics, or is
+    /// canceled, then the closure provided will never be invoked.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_err_1 = failed::<u32, u32>(1);
+    /// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
+    /// ```
+    fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
+        where F: FnOnce(Self::Error) -> E + Send + 'static,
+              E: Send + 'static,
+              Self: Sized,
+    {
+        assert_future::<Self::Item, E, _>(map_err::new(self, f))
+    }
+
+    /// Chain on a computation for when a future finished, passing the result of
+    /// the future to the provided closure `f`.
+    ///
+    /// This function can be used to ensure a computation runs regardless of
+    /// the conclusion of the future. The closure provided will be yielded a
+    /// `Result` once the future is complete.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed future
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// If this future is canceled or panics then the closure `f` will not be
+    /// run.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_1 = finished::<u32, u32>(1);
+    /// let future_of_4 = future_of_1.then(|x| {
+    ///     x.map(|y| y + 3)
+    /// });
+    ///
+    /// let future_of_err_1 = failed::<u32, u32>(1);
+    /// let future_of_4 = future_of_err_1.then(|x| {
+    ///     match x {
+    ///         Ok(_) => panic!("expected an error"),
+    ///         Err(y) => finished::<u32, u32>(y + 3),
+    ///     }
+    /// });
+    /// ```
+    fn then<F, B>(self, f: F) -> Then<Self, B, F>
+        where F: FnOnce(Result<Self::Item, Self::Error>) -> B + Send + 'static,
+              B: IntoFuture,
+              Self: Sized,
+    {
+        assert_future::<B::Item, B::Error, _>(then::new(self, f))
+    }
+
+    /// Execute another future after this one has resolved successfully.
+    ///
+    /// This function can be used to chain two futures together and ensure that
+    /// the final future isn't resolved until both have finished. The closure
+    /// provided is yielded the successful result of this future and returns
+    /// another value which can be converted into a future.
+    ///
+    /// Note that because `Result` implements the `IntoFuture` trait this method
+    /// can also be useful for chaining fallible and serial computations onto
+    /// the end of one future.
+    ///
+    /// If this future is canceled, panics, or completes with an error then the
+    /// provided closure `f` is never called.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_1 = finished::<u32, u32>(1);
+    /// let future_of_4 = future_of_1.and_then(|x| {
+    ///     Ok(x + 3)
+    /// });
+    ///
+    /// let future_of_err_1 = failed::<u32, u32>(1);
+    /// future_of_err_1.and_then(|_| -> Done<u32, u32> {
+    ///     panic!("should not be called in case of an error");
+    /// });
+    /// ```
+    fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
+        where F: FnOnce(Self::Item) -> B + Send + 'static,
+              B: IntoFuture<Error = Self::Error>,
+              Self: Sized,
+    {
+        assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
+    }
+
+    /// Execute another future after this one has resolved with an error.
+    ///
+    /// This function can be used to chain two futures together and ensure that
+    /// the final future isn't resolved until both have finished. The closure
+    /// provided is yielded the error of this future and returns another value
+    /// which can be converted into a future.
+    ///
+    /// Note that because `Result` implements the `IntoFuture` trait this method
+    /// can also be useful for chaining fallible and serial computations onto
+    /// the end of one future.
+    ///
+    /// If this future is canceled, panics, or completes successfully then the
+    /// provided closure `f` is never called.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_err_1 = failed::<u32, u32>(1);
+    /// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
+    ///     Ok(x + 3)
+    /// });
+    ///
+    /// let future_of_1 = finished::<u32, u32>(1);
+    /// future_of_1.or_else(|_| -> Done<u32, u32> {
+    ///     panic!("should not be called in case of success");
+    /// });
+    /// ```
+    fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
+        where F: FnOnce(Self::Error) -> B + Send + 'static,
+              B: IntoFuture<Item = Self::Item>,
+              Self: Sized,
+    {
+        assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
+    }
+
+    /// Waits for either one of two futures to complete.
+    ///
+    /// This function will return a new future which awaits for either this or
+    /// the `other` future to complete. The returned future will finish with
+    /// both the value resolved and a future representing the completion of the
+    /// other work. Both futures must have the same item and error type.
+    ///
+    /// If either future is canceled or panics, the other is canceled and the
+    /// original error is propagated upwards.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// // A poor-man's join implemented on top of select
+    ///
+    /// fn join<A>(a: A, b: A)
+    ///            -> Box<Future<Item=(A::Item, A::Item), Error=A::Error>>
+    ///     where A: Future,
+    /// {
+    ///     a.select(b).then(|res| {
+    ///         match res {
+    ///             Ok((a, b)) => b.map(|b| (a, b)).boxed(),
+    ///             Err((a, _)) => failed(a).boxed(),
+    ///         }
+    ///     }).boxed()
+    /// }
+    /// ```
+    fn select<B>(self, other: B) -> Select<Self, B::Future>
+        where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
+              Self: Sized,
+    {
+        let f = select::new(self, other.into_future());
+        assert_future::<(Self::Item, SelectNext<Self, B::Future>),
+                        (Self::Error, SelectNext<Self, B::Future>), _>(f)
+    }
+
+    /// Joins the result of two futures, waiting for them both to complete.
+    ///
+    /// This function will return a new future which awaits both this and the
+    /// `other` future to complete. The returned future will finish with a tuple
+    /// of both results.
+    ///
+    /// Both futures must have the same error type, and if either finishes with
+    /// an error then the other will be canceled and that error will be
+    /// returned.
+    ///
+    /// If either future is canceled or panics, the other is canceled and the
+    /// original error is propagated upwards.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let a = finished::<u32, u32>(1);
+    /// let b = finished::<u32, u32>(2);
+    /// let pair = a.join(b);
+    ///
+    /// pair.map(|(a, b)| {
+    ///     assert_eq!(a, 1);
+    ///     assert_eq!(b, 1);
+    /// });
+    /// ```
+    fn join<B>(self, other: B) -> Join<Self, B::Future>
+        where B: IntoFuture<Error=Self::Error>,
+              Self: Sized,
+    {
+        let f = join::new(self, other.into_future());
+        assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
+    }
+
+    join! {
+        (join3, Join3, new3, <b: B, c: C>),
+        (join4, Join4, new4, <b: B, c: C, d: D>),
+        (join5, Join5, new5, <b: B, c: C, d: D, e: E>),
+    }
+
+    /// Flatten the execution of this future when the successful result of this
+    /// future is itself another future.
+    ///
+    /// This can be useful when combining futures together to flatten the
+    /// computation out the the final result. This method can only be called
+    /// when the successful result of this future itself implements the
+    /// `IntoFuture` trait and the error can be created from this future's error
+    /// type.
+    ///
+    /// This method is equivalent to `self.then(|x| x)`.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::*;
+    ///
+    /// let future_of_a_future = finished::<_, u32>(finished::<u32, u32>(1));
+    /// let future_of_1 = future_of_a_future.flatten();
+    /// ```
+    fn flatten(self) -> Flatten<Self>
+        where Self::Item: IntoFuture,
+              <<Self as Future>::Item as IntoFuture>::Error:
+                    From<<Self as Future>::Error>,
+              Self: Sized
+    {
+        let f = flatten::new(self);
+        assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
+                        <<Self as Future>::Item as IntoFuture>::Error,
+                        _>(f)
+    }
+
+    /// Fuse a future such that `poll` will never again be called once it has
+    /// returned a success.
+    ///
+    /// Currently once a future has returned `Some` from `poll` any further
+    /// calls could exhibit bad behavior such as block forever, panic, never
+    /// return, etc. If it is known that `poll` may be called too often then
+    /// this method can be used to ensure that it has defined semantics.
+    ///
+    /// Once a future has been `fuse`d and it returns success from `poll`, then
+    /// it will forever return `None` from `poll` again (never resolve). This,
+    /// unlike the trait's `poll` method, is guaranteed.
+    ///
+    /// Additionally, once a future has completed, this `Fuse` combinator will
+    /// ensure that all registered callbacks will not be registered with the
+    /// underlying future.
+    ///
+    /// # Examples
+    ///
+    /// ```rust
+    /// use futures::*;
+    ///
+    /// let mut task = Task::new();
+    /// let mut future = finished::<i32, u32>(2);
+    /// assert!(future.poll(&mut task).is_ready());
+    ///
+    /// // Normally, a call such as this would panic:
+    /// //future.poll(&mut task);
+    ///
+    /// // This, however, is guaranteed to not panic
+    /// let mut future = finished::<i32, u32>(2).fuse();
+    /// assert!(future.poll(&mut task).is_ready());
+    /// assert!(future.poll(&mut task).is_not_ready());
+    /// ```
+    fn fuse(self) -> Fuse<Self>
+        where Self: Sized
+    {
+        let f = fuse::new(self);
+        assert_future::<Self::Item, Self::Error, _>(f)
+    }
+
+    /// Consume this future and allow it to execute without cancelling it.
+    ///
+    /// Normally whenever a future is dropped it signals that the underlying
+    /// computation should be cancelled ASAP. This function, however, will
+    /// consume the future and arrange for the future itself to get dropped only
+    /// when the computation has completed.
+    ///
+    /// This function can be useful to ensure that futures with side effects can
+    /// run "in the background", but it is discouraged as it doesn't allow any
+    /// control over the future in terms of cancellation.
+    ///
+    /// Generally applications should retain handles on futures to ensure
+    /// they're properly cleaned up if something unexpected happens.
+    fn forget(self) where Self: Sized {
+        forget::forget(self);
+    }
+}
+
+// Just a helper function to ensure the futures we're returning all have the
+// right implementations.
+fn assert_future<A, B, F>(t: F) -> F
+    where F: Future<Item=A, Error=B>,
+          A: Send + 'static,
+          B: Send + 'static,
+{
+    t
+}
+
+/// Class of types which can be converted themselves into a future.
+///
+/// This trait is very similar to the `IntoIterator` trait and is intended to be
+/// used in a very similar fashion.
+pub trait IntoFuture: Send + 'static {
+    /// The future that this type can be converted into.
+    type Future: Future<Item=Self::Item, Error=Self::Error>;
+
+    /// The item that the future may resolve with.
+    type Item: Send + 'static;
+    /// The error that the future may resolve with.
+    type Error: Send + 'static;
+
+    /// Consumes this object and produces a future.
+    fn into_future(self) -> Self::Future;
+}
+
+impl<F: Future> IntoFuture for F {
+    type Future = F;
+    type Item = F::Item;
+    type Error = F::Error;
+
+    fn into_future(self) -> F {
+        self
+    }
+}
+
+impl<T, E> IntoFuture for Result<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Future = Done<T, E>;
+    type Item = T;
+    type Error = E;
+
+    fn into_future(self) -> Done<T, E> {
+        done(self)
+    }
+}
diff --git a/futures-rs-test-all/src/lock.rs b/futures-rs-test-all/src/lock.rs
new file mode 100644
index 000000000..c2e33a53c
--- /dev/null
+++ b/futures-rs-test-all/src/lock.rs
@@ -0,0 +1,104 @@
+//! A "mutex" which only supports try_lock
+//!
+//! As a futures library the eventual call to `epoll` should be the only thing
+//! that ever blocks, so this is assisted with a fast user-space implementation
+//! of a lock that can only have a `try_lock` operation.
+
+use std::cell::UnsafeCell;
+use std::ops::{Deref, DerefMut};
+use std::sync::atomic::Ordering::{Acquire, Release};
+use std::sync::atomic::AtomicBool;
+
+/// A "mutex" around a value, similar to `std::sync::Mutex<T>`.
+///
+/// This lock only supports the `try_lock` operation, however, and does not
+/// implement poisoning.
+pub struct Lock<T> {
+    locked: AtomicBool,
+    data: UnsafeCell<T>,
+}
+
+/// Sentinel representing an acquired lock through which the data can be
+/// accessed.
+pub struct TryLock<'a, T: 'a> {
+    __ptr: &'a Lock<T>,
+}
+
+// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are
+// intended to mirror the standard library's corresponding impls for `Mutex<T>`.
+//
+// If a `T` is sendable across threads, so is the lock, and `T` must be sendable
+// across threads to be `Sync` because it allows mutable access from multiple
+// threads.
+unsafe impl<T: Send> Send for Lock<T> {}
+unsafe impl<T: Send> Sync for Lock<T> {}
+
+impl<T> Lock<T> {
+    /// Creates a new lock around the given value.
+    pub fn new(t: T) -> Lock<T> {
+        Lock {
+            locked: AtomicBool::new(false),
+            data: UnsafeCell::new(t),
+        }
+    }
+
+    /// Attempts to acquire this lock, returning whether the lock was acquired or
+    /// not.
+    ///
+    /// If `Some` is returned then the data this lock protects can be accessed
+    /// through the sentinel. This sentinel allows both mutable and immutable
+    /// access.
+    ///
+    /// If `None` is returned then the lock is already locked, either elsewhere
+    /// on this thread or on another thread.
+    pub fn try_lock(&self) -> Option<TryLock<T>> {
+        if !self.locked.swap(true, Acquire) {
+            Some(TryLock { __ptr: self })
+        } else {
+            None
+        }
+    }
+}
+
+impl<'a, T> Deref for TryLock<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        // The existence of `TryLock` represents that we own the lock, so we
+        // can safely access the data here.
+        unsafe { &*self.__ptr.data.get() }
+    }
+}
+
+impl<'a, T> DerefMut for TryLock<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // The existence of `TryLock` represents that we own the lock, so we
+        // can safely access the data here.
+        //
+        // Additionally, we're the *only* `TryLock` in existence so mutable
+        // access should be ok.
+        unsafe { &mut *self.__ptr.data.get() }
+    }
+}
+
+impl<'a, T> Drop for TryLock<'a, T> {
+    fn drop(&mut self) {
+        self.__ptr.locked.store(false, Release);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Lock;
+
+    #[test]
+    fn smoke() {
+        let a = Lock::new(1);
+        let mut a1 = a.try_lock().unwrap();
+        assert!(a.try_lock().is_none());
+        assert_eq!(*a1, 1);
+        *a1 = 2;
+        drop(a1);
+        assert_eq!(*a.try_lock().unwrap(), 2);
+        assert_eq!(*a.try_lock().unwrap(), 2);
+    }
+}
diff --git a/futures-rs-test-all/src/map.rs b/futures-rs-test-all/src/map.rs
new file mode 100644
index 000000000..8a46486df
--- /dev/null
+++ b/futures-rs-test-all/src/map.rs
@@ -0,0 +1,43 @@
+use {Future, Task, Poll};
+use util::Collapsed;
+
+/// Future for the `map` combinator, changing the type of a future.
+///
+/// This is created by this `Future::map` method.
+pub struct Map<A, F> where A: Future {
+    future: Collapsed<A>,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> Map<A, F>
+    where A: Future,
+{
+    Map {
+        future: Collapsed::Start(future),
+        f: Some(f),
+    }
+}
+
+impl<U, A, F> Future for Map<A, F>
+    where A: Future,
+          F: FnOnce(A::Item) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    type Item = U;
+    type Error = A::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<U, A::Error> {
+        let result = try_poll!(self.future.poll(task));
+        result.map(self.f.take().expect("cannot poll Map twice")).into()
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.future.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.future.collapse();
+        None
+    }
+}
diff --git a/futures-rs-test-all/src/map_err.rs b/futures-rs-test-all/src/map_err.rs
new file mode 100644
index 000000000..a07f5a485
--- /dev/null
+++ b/futures-rs-test-all/src/map_err.rs
@@ -0,0 +1,43 @@
+use {Future, Task, Poll};
+use util::Collapsed;
+
+/// Future for the `map_err` combinator, changing the error type of a future.
+///
+/// This is created by this `Future::map_err` method.
+pub struct MapErr<A, F> where A: Future {
+    future: Collapsed<A>,
+    f: Option<F>,
+}
+
+pub fn new<A, F>(future: A, f: F) -> MapErr<A, F>
+    where A: Future
+{
+    MapErr {
+        future: Collapsed::Start(future),
+        f: Some(f),
+    }
+}
+
+impl<U, A, F> Future for MapErr<A, F>
+    where A: Future,
+          F: FnOnce(A::Error) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    type Item = A::Item;
+    type Error = U;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<A::Item, U> {
+        let result = try_poll!(self.future.poll(task));
+        result.map_err(self.f.take().expect("cannot poll MapErr twice")).into()
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.future.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.future.collapse();
+        None
+    }
+}
diff --git a/futures-rs-test-all/src/or_else.rs b/futures-rs-test-all/src/or_else.rs
new file mode 100644
index 000000000..55a2decdd
--- /dev/null
+++ b/futures-rs-test-all/src/or_else.rs
@@ -0,0 +1,47 @@
+use {Future, IntoFuture, Task, Poll};
+use chain::Chain;
+
+/// Future for the `or_else` combinator, chaining a computation onto the end of
+/// a future which fails with an error.
+///
+/// This is created by this `Future::or_else` method.
+pub struct OrElse<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> OrElse<A, B, F>
+    where A: Future,
+          B: IntoFuture<Item=A::Item>,
+          F: Send + 'static,
+{
+    OrElse {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for OrElse<A, B, F>
+    where A: Future,
+          B: IntoFuture<Item=A::Item>,
+          F: FnOnce(A::Error) -> B + Send + 'static,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<B::Item, B::Error> {
+        self.state.poll(task, |a, f| {
+            match a {
+                Ok(item) => Ok(Ok(item)),
+                Err(e) => Ok(Err(f(e).into_future()))
+            }
+        })
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.state.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.state.tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/poll.rs b/futures-rs-test-all/src/poll.rs
new file mode 100644
index 000000000..8d78839d5
--- /dev/null
+++ b/futures-rs-test-all/src/poll.rs
@@ -0,0 +1,79 @@
+
+#[macro_export]
+macro_rules! try_poll {
+    ($e:expr) => (match $e {
+        $crate::Poll::NotReady => return $crate::Poll::NotReady,
+        $crate::Poll::Ok(t) => Ok(t),
+        $crate::Poll::Err(e) => Err(e),
+    })
+}
+
+/// Possible return values from the `Future::poll` method.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Poll<T, E> {
+    /// Indicates that the future is not ready yet, ask again later.
+    NotReady,
+
+    /// Indicates that the future has completed successfully, and this value is
+    /// what the future completed with.
+    Ok(T),
+
+    /// Indicates that the future has failed, and this error is what the future
+    /// failed with.
+    Err(E),
+}
+
+impl<T, E> Poll<T, E> {
+    /// Change the success type of this `Poll` value with the closure provided
+    pub fn map<F, U>(self, f: F) -> Poll<U, E>
+        where F: FnOnce(T) -> U
+    {
+        match self {
+            Poll::NotReady => Poll::NotReady,
+            Poll::Ok(t) => Poll::Ok(f(t)),
+            Poll::Err(e) => Poll::Err(e),
+        }
+    }
+
+    /// Change the error type of this `Poll` value with the closure provided
+    pub fn map_err<F, U>(self, f: F) -> Poll<T, U>
+        where F: FnOnce(E) -> U
+    {
+        match self {
+            Poll::NotReady => Poll::NotReady,
+            Poll::Ok(t) => Poll::Ok(t),
+            Poll::Err(e) => Poll::Err(f(e)),
+        }
+    }
+
+    /// Returns whether this is `Poll::NotReady`
+    pub fn is_not_ready(&self) -> bool {
+        match *self {
+            Poll::NotReady => true,
+            _ => false,
+        }
+    }
+
+    /// Returns whether this is either `Poll::Ok` or `Poll::Err`
+    pub fn is_ready(&self) -> bool {
+        !self.is_not_ready()
+    }
+
+    /// Unwraps this `Poll` into a `Result`, panicking if it's not ready.
+    pub fn unwrap(self) -> Result<T, E> {
+        match self {
+            Poll::Ok(t) => Ok(t),
+            Poll::Err(t) => Err(t),
+            Poll::NotReady => panic!("unwrapping a Poll that wasn't ready"),
+        }
+    }
+}
+
+impl<T, E> From<Result<T, E>> for Poll<T, E> {
+    fn from(r: Result<T, E>) -> Poll<T, E> {
+        match r {
+            Ok(t) => Poll::Ok(t),
+            Err(t) => Poll::Err(t),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/promise.rs b/futures-rs-test-all/src/promise.rs
new file mode 100644
index 000000000..e32809f10
--- /dev/null
+++ b/futures-rs-test-all/src/promise.rs
@@ -0,0 +1,155 @@
+use std::sync::Arc;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+use {Future, Task, Poll};
+use slot::{Slot, Token};
+
+/// A future representing the completion of a computation happening elsewhere in
+/// memory.
+///
+/// This is created by the `promise` function.
+pub struct Promise<T>
+    where T: Send + 'static,
+{
+    inner: Arc<Inner<T>>,
+    cancel_token: Option<Token>,
+}
+
+/// Represents the completion half of a promise through which the result of a
+/// computation is signaled.
+///
+/// This is created by the `promise` function.
+pub struct Complete<T>
+    where T: Send + 'static,
+{
+    inner: Arc<Inner<T>>,
+    completed: bool,
+}
+
+struct Inner<T> {
+    slot: Slot<Option<T>>,
+    pending_wake: AtomicBool,
+}
+
+/// Creates a new in-memory promise used to represent completing a computation.
+///
+/// A promise in this library is a concrete implementation of the `Future` trait
+/// used to complete a computation from one location with a future representing
+/// what to do in another.
+///
+/// This function is similar to Rust's channels found in the standard library.
+/// Two halves are returned, the first of which is a `Promise` which implements
+/// the `Future` trait. The second half is a `Complete` handle which is used to
+/// signal the end of a computation.
+///
+/// Each half can be separately owned and sent across threads.
+///
+/// # Examples
+///
+/// ```
+/// use futures::*;
+///
+/// let (c, p) = promise::<i32>();
+///
+/// p.map(|i| {
+///     println!("got: {}", i);
+/// }).forget();
+///
+/// c.complete(3);
+/// ```
+pub fn promise<T>() -> (Complete<T>, Promise<T>)
+    where T: Send + 'static,
+{
+    let inner = Arc::new(Inner {
+        slot: Slot::new(None),
+        pending_wake: AtomicBool::new(false),
+    });
+    let promise = Promise {
+        inner: inner.clone(),
+        cancel_token: None,
+    };
+    let complete = Complete {
+        inner: inner,
+        completed: false,
+    };
+    (complete, promise)
+}
+
+impl<T> Complete<T>
+    where T: Send + 'static,
+{
+    /// Completes this promise with a successful result.
+    ///
+    /// This function will consume `self` and indicate to the other end, the
+    /// `Promise`, that the error provided is the result of the computation this
+    /// represents.
+    pub fn complete(mut self, t: T) {
+        self.completed = true;
+        self.send(Some(t))
+    }
+
+    fn send(&mut self, t: Option<T>) {
+        if let Err(e) = self.inner.slot.try_produce(t) {
+            self.inner.slot.on_empty(|slot| {
+                slot.try_produce(e.into_inner()).ok()
+                    .expect("advertised as empty but wasn't");
+            });
+        }
+    }
+}
+
+impl<T> Drop for Complete<T>
+    where T: Send + 'static,
+{
+    fn drop(&mut self) {
+        if !self.completed {
+            self.send(None);
+        }
+    }
+}
+
+/// Error returned from a `Promise<T>` whenever the correponding `Complete<T>`
+/// is dropped.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct Canceled;
+
+impl<T: Send + 'static> Future for Promise<T> {
+    type Item = T;
+    type Error = Canceled;
+
+    fn poll(&mut self, _: &mut Task) -> Poll<T, Canceled> {
+        if self.inner.pending_wake.load(Ordering::SeqCst) {
+            return Poll::NotReady
+        }
+        match self.inner.slot.try_consume() {
+            Ok(Some(e)) => Poll::Ok(e),
+            Ok(None) => Poll::Err(Canceled),
+            Err(_) => Poll::NotReady,
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if self.inner.pending_wake.load(Ordering::SeqCst) {
+            if let Some(cancel_token) = self.cancel_token.take() {
+                self.inner.slot.cancel(cancel_token);
+            }
+        }
+        self.inner.pending_wake.store(true, Ordering::SeqCst);
+        let inner = self.inner.clone();
+        let handle = task.handle().clone();
+        self.cancel_token = Some(self.inner.slot.on_full(move |_| {
+            inner.pending_wake.store(false, Ordering::SeqCst);
+            handle.notify();
+        }));
+    }
+}
+
+impl<T> Drop for Promise<T>
+    where T: Send + 'static,
+{
+    fn drop(&mut self) {
+        if let Some(cancel_token) = self.cancel_token.take() {
+            self.inner.slot.cancel(cancel_token)
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/select.rs b/futures-rs-test-all/src/select.rs
new file mode 100644
index 000000000..22e981e70
--- /dev/null
+++ b/futures-rs-test-all/src/select.rs
@@ -0,0 +1,121 @@
+use std::mem;
+
+use {Future, Task, empty, Poll};
+use util::Collapsed;
+
+/// Future for the `select` combinator, waiting for one of two futures to
+/// complete.
+///
+/// This is created by this `Future::select` method.
+pub struct Select<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+    inner: Option<(Collapsed<A>, Collapsed<B>)>,
+}
+
+/// Future yielded as the second result in a `Select` future.
+///
+/// This sentinel future represents the completion of the second future to a
+/// `select` which finished second.
+pub struct SelectNext<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
+    inner: OneOf<A, B>,
+}
+
+enum OneOf<A, B> where A: Future, B: Future {
+    A(Collapsed<A>),
+    B(Collapsed<B>),
+}
+
+pub fn new<A, B>(a: A, b: B) -> Select<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>
+{
+    let a = Collapsed::Start(a);
+    let b = Collapsed::Start(b);
+    Select {
+        inner: Some((a, b)),
+    }
+}
+
+impl<A, B> Future for Select<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>,
+{
+    type Item = (A::Item, SelectNext<A, B>);
+    type Error = (A::Error, SelectNext<A, B>);
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        let (ret, is_a) = match self.inner {
+            Some((ref mut a, ref mut b)) => {
+                match a.poll(task) {
+                    Poll::Ok(a) => (Ok(a), true),
+                    Poll::Err(a) => (Err(a), true),
+                    Poll::NotReady => (try_poll!(b.poll(task)), false),
+                }
+            }
+            None => panic!("cannot poll select twice"),
+        };
+
+        let (a, b) = self.inner.take().unwrap();
+        let next = if is_a {OneOf::B(b)} else {OneOf::A(a)};
+        let next = SelectNext { inner: next };
+        match ret {
+            Ok(a) => Poll::Ok((a, next)),
+            Err(e) => Poll::Err((e, next)),
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.inner {
+            Some((ref mut a, ref mut b)) => {
+                a.schedule(task);
+                b.schedule(task);
+            }
+            None => task.notify(),
+        }
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        if let Some((ref mut a, ref mut b)) = self.inner {
+            a.collapse();
+            b.collapse();
+        }
+        None
+    }
+}
+
+impl<A, B> Future for SelectNext<A, B>
+    where A: Future,
+          B: Future<Item=A::Item, Error=A::Error>,
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        match self.inner {
+            OneOf::A(ref mut a) => a.poll(task),
+            OneOf::B(ref mut b) => b.poll(task),
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.inner {
+            OneOf::A(ref mut a) => a.schedule(task),
+            OneOf::B(ref mut b) => b.schedule(task),
+        }
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        match self.inner {
+            OneOf::A(ref mut a) => a.collapse(),
+            OneOf::B(ref mut b) => b.collapse(),
+        }
+        match self.inner {
+            OneOf::A(Collapsed::Tail(ref mut a)) |
+            OneOf::B(Collapsed::Tail(ref mut a)) => {
+                Some(mem::replace(a, Box::new(empty())))
+            }
+            _ => None,
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/select_all.rs b/futures-rs-test-all/src/select_all.rs
new file mode 100644
index 000000000..9cf10dd38
--- /dev/null
+++ b/futures-rs-test-all/src/select_all.rs
@@ -0,0 +1,113 @@
+use std::mem;
+
+use {Future, IntoFuture, Task, empty, Poll};
+use util::Collapsed;
+
+/// Future for the `select_all` combinator, waiting for one of any of a list of
+/// futures to complete.
+///
+/// This is created by this `select_all` function.
+pub struct SelectAll<A> where A: Future {
+    inner: Vec<SelectAllNext<A>>,
+}
+
+/// Future yielded as the result in a `SelectAll` future.
+///
+/// This sentinel future represents the completion of the remaining futures in a
+/// list of futures.
+pub struct SelectAllNext<A> where A: Future {
+    inner: Collapsed<A>,
+}
+
+/// Creates a new future which will select over a list of futures.
+///
+/// The returned future will wait for any future within `list` to be ready. Upon
+/// completion or failure the item resolved will be returned, along with the
+/// index of the future that was ready and the list of all the remaining
+/// futures.
+///
+/// # Panics
+///
+/// This function will panic if the iterator specified contains no items.
+pub fn select_all<I>(iter: I) -> SelectAll<<I::Item as IntoFuture>::Future>
+    where I: IntoIterator,
+          I::Item: IntoFuture,
+{
+    let ret = SelectAll {
+        inner: iter.into_iter()
+                   .map(|a| a.into_future())
+                   .map(Collapsed::Start)
+                   .map(|a| SelectAllNext { inner: a })
+                   .collect(),
+    };
+    assert!(ret.inner.len() > 0);
+    return ret
+}
+
+impl<A> Future for SelectAll<A>
+    where A: Future,
+{
+    type Item = (A::Item, usize, Vec<SelectAllNext<A>>);
+    type Error = (A::Error, usize, Vec<SelectAllNext<A>>);
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
+            match f.poll(task) {
+                Poll::NotReady => None,
+                Poll::Ok(e) => Some((i, Ok(e))),
+                Poll::Err(e) => Some((i, Err(e))),
+            }
+        }).next();
+        match item {
+            Some((idx, res)) => {
+                self.inner.remove(idx);
+                let rest = mem::replace(&mut self.inner, Vec::new());
+                match res {
+                    Ok(e) => Poll::Ok((e, idx, rest)),
+                    Err(e) => Poll::Err((e, idx, rest)),
+                }
+            }
+            None => Poll::NotReady,
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        for f in self.inner.iter_mut() {
+            f.inner.schedule(task);
+        }
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        for f in self.inner.iter_mut() {
+            f.inner.collapse();
+        }
+        None
+    }
+}
+
+impl<A> Future for SelectAllNext<A>
+    where A: Future,
+{
+    type Item = A::Item;
+    type Error = A::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        self.inner.poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.inner.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.inner.collapse();
+        match self.inner {
+            Collapsed::Tail(ref mut a) => {
+                Some(mem::replace(a, Box::new(empty())))
+            }
+            _ => None,
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/slot.rs b/futures-rs-test-all/src/slot.rs
new file mode 100644
index 000000000..8d5e900e5
--- /dev/null
+++ b/futures-rs-test-all/src/slot.rs
@@ -0,0 +1,671 @@
+//! A slot in memory for communicating between a producer and a consumer.
+//!
+//! This module contains an implementation detail of this library for a type
+//! which is only intended to be shared between one consumer and one producer of
+//! a value. It is unlikely that this module will survive stabilization of this
+//! library, so it is not recommended to rely on it.
+
+#![allow(dead_code)] // imported in a few places
+
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use lock::Lock;
+
+/// A slot in memory intended to represent the communication channel between one
+/// producer and one consumer.
+///
+/// Each slot contains space for a piece of data of type `T`, and can have
+/// callbacks registered to run when the slot is either full or empty.
+///
+/// Slots are only intended to be shared between exactly one producer and
+/// exactly one consumer. If there are multiple concurrent producers or
+/// consumers then this is still memory safe but will have unpredictable results
+/// (and maybe panics). Note that this does not require that the "consumer" is
+/// the same for the entire lifetime of a slot, simply that there is only one
+/// consumer at a time.
+///
+/// # Registering callbacks
+///
+/// [`on_empty`](#method.on_empty) registers a callback to run when the slot
+/// becomes empty, and [`on_full`](#method.on_full) registers one to run when it
+/// becomes full. In both cases, the callback will run immediately if possible.
+///
+/// At most one callback can be registered at any given time: it is an error to
+/// attempt to register a callback with `on_full` if one is currently registered
+/// via `on_empty`, or any other combination.
+///
+/// # Cancellation
+///
+/// Registering a callback returns a `Token` which can be used to
+/// [`cancel`](#method.cancel) the callback. Only callbacks that have not yet
+/// started running can be canceled. Canceling a callback that has already run
+/// is not an error, and `cancel` does not signal whether or not the callback
+/// was actually canceled to the caller.
+pub struct Slot<T> {
+    // The purpose of this data type is to communicate when a value becomes
+    // available and coordinate between a producer and consumer about that
+    // value. Slots end up being at the core of many futures as they handle
+    // values transferring between producers and consumers, which means that
+    // they can never block.
+    //
+    // As a result, this `Slot` is a lock-free implementation in terms of not
+    // actually blocking at any point in time. The `Lock` types are
+    // half-optional and half-not-optional. They aren't actually mutexes as they
+    // only support a `try_lock` operation, and all the methods below ensure
+    // that progress can always be made without blocking.
+    //
+    // The `state` variable keeps track of the state of this slot, while the
+    // other fields here are just the payloads of the slot itself. Note that the
+    // exact bits of `state` are typically wrapped up in a `State` for
+    // inspection (see below).
+    state: AtomicUsize,
+    slot: Lock<Option<T>>,
+    on_full: Lock<Option<Box<FnBox<T>>>>,
+    on_empty: Lock<Option<Box<FnBox<T>>>>,
+}
+
+/// Error value returned from erroneous calls to `try_produce`, which contains
+/// the value that was passed to `try_produce`.
+#[derive(Debug, PartialEq)]
+pub struct TryProduceError<T>(T);
+
+/// Error value returned from erroneous calls to `try_consume`.
+#[derive(Debug, PartialEq)]
+pub struct TryConsumeError(());
+
+/// Error value returned from erroneous calls to `on_full`.
+#[derive(Debug, PartialEq)]
+pub struct OnFullError(());
+
+/// Error value returned from erroneous calls to `on_empty`.
+#[derive(Debug, PartialEq)]
+pub struct OnEmptyError(());
+
+/// A `Token` represents a registered callback, and can be used to cancel the callback.
+#[derive(Clone, Copy)]
+pub struct Token(usize);
+
+// Slot state: the lowest 3 bits are flags; the remaining bits are used to
+// store the `Token` for the currently registered callback. The special token
+// value 0 means no callback is registered.
+//
+// The flags are:
+//   - `DATA`: the `Slot` contains a value
+//   - `ON_FULL`: the `Slot` has an `on_full` callback registered
+//   - `ON_EMPTY`: the `Slot` has an `on_empty` callback registered
+struct State(usize);
+
+const DATA: usize = 1 << 0;
+const ON_FULL: usize = 1 << 1;
+const ON_EMPTY: usize = 1 << 2;
+const STATE_BITS: usize = 3;
+const STATE_MASK: usize = (1 << STATE_BITS) - 1;
+
+fn _is_send<T: Send>() {}
+fn _is_sync<T: Send>() {}
+
+fn _assert() {
+    _is_send::<Slot<i32>>();
+    _is_sync::<Slot<u32>>();
+}
+
+impl<T: 'static> Slot<T> {
+    /// Creates a new `Slot` containing `val`, which may be `None` to create an
+    /// empty `Slot`.
+    pub fn new(val: Option<T>) -> Slot<T> {
+        Slot {
+            state: AtomicUsize::new(if val.is_some() {DATA} else {0}),
+            slot: Lock::new(val),
+            on_full: Lock::new(None),
+            on_empty: Lock::new(None),
+        }
+    }
+
+    /// Attempts to store `t` in the slot.
+    ///
+    /// This method can only be called by the one consumer working on this
+    /// `Slot`. Concurrent calls to this method or `on_empty` will result in
+    /// panics or possibly errors.
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err` if the slot is already full. The value you attempted to
+    /// store is included in the error value.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if called concurrently with `try_produce` or
+    /// `on_empty`, or if `on_empty` has been called previously but the callback
+    /// hasn't fired.
+    pub fn try_produce(&self, t: T) -> Result<(), TryProduceError<T>> {
+        // First up, let's take a look at our current state. Of our three flags,
+        // we check a few:
+        //
+        // * DATA - if this is set, then the production fails as a value has
+        //          already been produced and we're not ready to receive it yet.
+        // * ON_EMPTY - this should never be set as it indicates a contract
+        //              violation as the producer already registered interest in
+        //              a value but the callback wasn't fired.
+        // * ON_FULL - doesn't matter in this use case, we don't check it as
+        //             either state is valid.
+        let mut state = State(self.state.load(Ordering::SeqCst));
+        assert!(!state.flag(ON_EMPTY));
+        if state.flag(DATA) {
+            return Err(TryProduceError(t))
+        }
+
+        // Ok, so we've determined that our state is either `ON_FULL` or `0`, in
+        // both cases we're going to store our data into our slot. This should
+        // always succeed as access to `slot` is gated on the `DATA` flag being
+        // set on the consumer side (which isn't set) and there should only be
+        // one producer.
+        let mut slot = self.slot.try_lock().expect("interference with consumer?");
+        assert!(slot.is_none());
+        *slot = Some(t);
+        drop(slot);
+
+        // Next, we update our state with `DATA` to say that something is
+        // available, and we also unset `ON_FULL` because we'll invoke the
+        // callback if it's available.
+        loop {
+            assert!(!state.flag(ON_EMPTY));
+            let new_state = state.set_flag(DATA, true).set_flag(ON_FULL, false);
+            let old = self.state.compare_and_swap(state.0,
+                                                  new_state.0,
+                                                  Ordering::SeqCst);
+            if old == state.0 {
+                break
+            }
+            state.0 = old;
+        }
+
+        // If our previous state we transitioned from indicates that it has an
+        // on-full callback, we call that callback here. There's a few unwraps
+        // here that should never fail because the consumer shouldn't be placing
+        // another callback here and there shouldn't be any other producers as
+        // well.
+        if state.flag(ON_FULL) {
+            let cb = self.on_full.try_lock().expect("interference2")
+                                 .take().expect("ON_FULL but no callback");
+            cb.call_box(self);
+        }
+        Ok(())
+    }
+
+    /// Registers `f` as a callback to run when the slot becomes empty.
+    ///
+    /// The callback will run immediately if the slot is already empty. Returns
+    /// a token that can be used to cancel the callback. This method is to be
+    /// called by the producer, and it is illegal to call this method
+    /// concurrently with either `on_empty` or `try_produce`.
+    ///
+    /// # Panics
+    ///
+    /// Panics if another callback was already registered via `on_empty` or
+    /// `on_full`, or if this value is called concurrently with other producer
+    /// methods.
+    pub fn on_empty<F>(&self, f: F) -> Token
+        where F: FnOnce(&Slot<T>) + Send + 'static
+    {
+        // First up, as usual, take a look at our state. Of the three flags we
+        // check two:
+        //
+        // * DATA - if set, we keep going, but if unset we're done as there's no
+        //          data and we're already empty.
+        // * ON_EMPTY - this should be impossible as it's a contract violation
+        //              to call this twice or concurrently.
+        // * ON_FULL - it's illegal to have both an empty and a full callback
+        //             simultaneously, so we check this just after we ensure
+        //             there's data available. If there's data there should not
+        //             be a full callback as it should have been called.
+        let mut state = State(self.state.load(Ordering::SeqCst));
+        assert!(!state.flag(ON_EMPTY));
+        if !state.flag(DATA) {
+            f(self);
+            return Token(0)
+        }
+        assert!(!state.flag(ON_FULL));
+
+        // At this point we've precisely determined that our state is `DATA` and
+        // all other flags are unset. We're cleared for landing in initializing
+        // the `on_empty` slot so we store our callback here.
+        let mut slot = self.on_empty.try_lock().expect("on_empty interference");
+        assert!(slot.is_none());
+        *slot = Some(Box::new(f));
+        drop(slot);
+
+        // In this loop, we transition ourselves from the `DATA` state to a
+        // state which has the on empty flag state. Note that we also increase
+        // the token of this state as we're registering a new callback.
+        loop {
+            assert!(state.flag(DATA));
+            assert!(!state.flag(ON_FULL));
+            assert!(!state.flag(ON_EMPTY));
+            let new_state = state.set_flag(ON_EMPTY, true)
+                                 .set_token(state.token() + 1);
+            let old = self.state.compare_and_swap(state.0,
+                                                  new_state.0,
+                                                  Ordering::SeqCst);
+
+            // If we succeeded in the CAS, then we're done and our token is
+            // valid.
+            if old == state.0 {
+                return Token(new_state.token())
+            }
+            state.0 = old;
+
+            // If we failed the CAS but the data was taken in the meantime we
+            // abort our attempt to set on-empty and call the callback
+            // immediately. Note that the on-empty flag was never set, so it
+            // should still be there and should be available to take.
+            if !state.flag(DATA) {
+                let cb = self.on_empty.try_lock().expect("on_empty interference2")
+                                      .take().expect("on_empty not empty??");
+                cb.call_box(self);
+                return Token(0)
+            }
+        }
+    }
+
+    /// Attempts to consume the value stored in the slot.
+    ///
+    /// This method can only be called by the one consumer of this slot, and
+    /// cannot be called concurrently with `try_consume` or `on_full`.
+    ///
+    /// # Errors
+    ///
+    /// Returns `Err` if the slot is already empty.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if called concurrently with `try_consume` or
+    /// `on_full`, or otherwise show weird behavior.
+    pub fn try_consume(&self) -> Result<T, TryConsumeError> {
+        // The implementation of this method is basically the same as
+        // `try_produce` above, it's just the opposite of all the operations.
+        let mut state = State(self.state.load(Ordering::SeqCst));
+        assert!(!state.flag(ON_FULL));
+        if !state.flag(DATA) {
+            return Err(TryConsumeError(()))
+        }
+        let mut slot = self.slot.try_lock().expect("interference with producer?");
+        let val = slot.take().expect("DATA but not data");
+        drop(slot);
+
+        loop {
+            assert!(!state.flag(ON_FULL));
+            let new_state = state.set_flag(DATA, false).set_flag(ON_EMPTY, false);
+            let old = self.state.compare_and_swap(state.0,
+                                                  new_state.0,
+                                                  Ordering::SeqCst);
+            if old == state.0 {
+                break
+            }
+            state.0 = old;
+        }
+        assert!(!state.flag(ON_FULL));
+        if state.flag(ON_EMPTY) {
+            let cb = self.on_empty.try_lock().expect("interference3")
+                                  .take().expect("ON_EMPTY but no callback");
+            cb.call_box(self);
+        }
+        Ok(val)
+    }
+
+    /// Registers `f` as a callback to run when the slot becomes full.
+    ///
+    /// The callback will run immediately if the slot is already full. Returns a
+    /// token that can be used to cancel the callback.
+    ///
+    /// This method is to be called by the consumer.
+    ///
+    /// # Panics
+    ///
+    /// Panics if another callback was already registered via `on_empty` or
+    /// `on_full` or if called concurrently with `on_full` or `try_consume`.
+    pub fn on_full<F>(&self, f: F) -> Token
+        where F: FnOnce(&Slot<T>) + Send + 'static
+    {
+        // The implementation of this method is basically the same as
+        // `on_empty` above, it's just the opposite of all the operations.
+        let mut state = State(self.state.load(Ordering::SeqCst));
+        assert!(!state.flag(ON_FULL));
+        if state.flag(DATA) {
+            f(self);
+            return Token(0)
+        }
+        assert!(!state.flag(ON_EMPTY));
+
+        let mut slot = self.on_full.try_lock().expect("on_full interference");
+        assert!(slot.is_none());
+        *slot = Some(Box::new(f));
+        drop(slot);
+
+        loop {
+            assert!(!state.flag(DATA));
+            assert!(!state.flag(ON_EMPTY));
+            assert!(!state.flag(ON_FULL));
+            let new_state = state.set_flag(ON_FULL, true)
+                                 .set_token(state.token() + 1);
+            let old = self.state.compare_and_swap(state.0,
+                                                  new_state.0,
+                                                  Ordering::SeqCst);
+            if old == state.0 {
+                return Token(new_state.token())
+            }
+            state.0 = old;
+
+            if state.flag(DATA) {
+                let cb = self.on_full.try_lock().expect("on_full interference2")
+                                      .take().expect("on_full not full??");
+                cb.call_box(self);
+                return Token(0)
+            }
+        }
+    }
+
+    /// Cancels the callback associated with `token`.
+    ///
+    /// Canceling a callback that has already started running, or has already
+    /// run will do nothing, and is not an error. See
+    /// [Cancellation](#cancellation).
+    ///
+    /// # Panics
+    ///
+    /// This method may cause panics if it is called concurrently with
+    /// `on_empty` or `on_full`, depending on which callback is being canceled.
+    pub fn cancel(&self, token: Token) {
+        // Tokens with a value of "0" are sentinels which don't actually do
+        // anything.
+        let token = token.0;
+        if token == 0 {
+            return
+        }
+
+        let mut state = State(self.state.load(Ordering::SeqCst));
+        loop {
+            // If we've moved on to a different token, then we're guaranteed
+            // that our token won't show up again, so we can return immediately
+            // as our closure has likely already run (or been previously
+            // canceled).
+            if state.token() != token {
+                return
+            }
+
+            // If our token matches, then let's see if we're cancelling either
+            // the on-full or on-empty callbacks. It's illegal to have them both
+            // registered, so we only need to look at one.
+            //
+            // If neither are set then the token has probably already run, so we
+            // just continue along our merry way and don't worry.
+            let new_state = if state.flag(ON_FULL) {
+                assert!(!state.flag(ON_EMPTY));
+                state.set_flag(ON_FULL, false)
+            } else if state.flag(ON_EMPTY) {
+                assert!(!state.flag(ON_FULL));
+                state.set_flag(ON_EMPTY, false)
+            } else {
+                return
+            };
+            let old = self.state.compare_and_swap(state.0,
+                                                  new_state.0,
+                                                  Ordering::SeqCst);
+            if old == state.0 {
+                break
+            }
+            state.0 = old;
+        }
+
+        // Figure out which callback we just canceled, and now that the flag is
+        // unset we should own the callback to clear it.
+
+        if state.flag(ON_FULL) {
+            let cb = self.on_full.try_lock().expect("on_full interference3")
+                                 .take().expect("on_full not full??");
+            drop(cb);
+        } else {
+            let cb = self.on_empty.try_lock().expect("on_empty interference3")
+                                  .take().expect("on_empty not empty??");
+            drop(cb);
+        }
+    }
+}
+
+impl<T> TryProduceError<T> {
+    /// Extracts the value that was attempted to be produced.
+    pub fn into_inner(self) -> T {
+        self.0
+    }
+}
+
+trait FnBox<T: 'static>: Send + 'static {
+    fn call_box(self: Box<Self>, other: &Slot<T>);
+}
+
+impl<T, F> FnBox<T> for F
+    where F: FnOnce(&Slot<T>) + Send + 'static,
+          T: 'static,
+{
+    fn call_box(self: Box<F>, other: &Slot<T>) {
+        (*self)(other)
+    }
+}
+
+impl State {
+    fn flag(&self, f: usize) -> bool {
+        self.0 & f != 0
+    }
+
+    fn set_flag(&self, f: usize, val: bool) -> State {
+        State(if val {
+            self.0 | f
+        } else {
+            self.0 & !f
+        })
+    }
+
+    fn token(&self) -> usize {
+        self.0 >> STATE_BITS
+    }
+
+    fn set_token(&self, gen: usize) -> State {
+        State((gen << STATE_BITS) | (self.0 & STATE_MASK))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Arc;
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::thread;
+
+    use super::Slot;
+
+    #[test]
+    fn sequential() {
+        let slot = Slot::new(Some(1));
+
+        // We can consume once
+        assert_eq!(slot.try_consume(), Ok(1));
+        assert!(slot.try_consume().is_err());
+
+        // Consume a production
+        assert_eq!(slot.try_produce(2), Ok(()));
+        assert_eq!(slot.try_consume(), Ok(2));
+
+        // Can't produce twice
+        assert_eq!(slot.try_produce(3), Ok(()));
+        assert!(slot.try_produce(3).is_err());
+
+        // on_full is run immediately if full
+        let hit = Arc::new(AtomicUsize::new(0));
+        let hit2 = hit.clone();
+        slot.on_full(move |_s| {
+            hit2.fetch_add(1, Ordering::SeqCst);
+        });
+        assert_eq!(hit.load(Ordering::SeqCst), 1);
+
+        // on_full can be run twice, and we can consume in the callback
+        let hit2 = hit.clone();
+        slot.on_full(move |s| {
+            hit2.fetch_add(1, Ordering::SeqCst);
+            assert_eq!(s.try_consume(), Ok(3));
+        });
+        assert_eq!(hit.load(Ordering::SeqCst), 2);
+
+        // Production can't run a previous callback
+        assert_eq!(slot.try_produce(4), Ok(()));
+        assert_eq!(hit.load(Ordering::SeqCst), 2);
+        assert_eq!(slot.try_consume(), Ok(4));
+
+        // Productions run new callbacks
+        let hit2 = hit.clone();
+        slot.on_full(move |s| {
+            hit2.fetch_add(1, Ordering::SeqCst);
+            assert_eq!(s.try_consume(), Ok(5));
+        });
+        assert_eq!(slot.try_produce(5), Ok(()));
+        assert_eq!(hit.load(Ordering::SeqCst), 3);
+
+        // on empty should fire immediately for an empty slot
+        let hit2 = hit.clone();
+        slot.on_empty(move |_| {
+            hit2.fetch_add(1, Ordering::SeqCst);
+        });
+        assert_eq!(hit.load(Ordering::SeqCst), 4);
+    }
+
+    #[test]
+    fn channel() {
+        const N: usize = 10000;
+
+        struct Sender {
+            slot: Arc<Slot<usize>>,
+            hit: Arc<AtomicUsize>,
+        }
+
+        struct Receiver {
+            slot: Arc<Slot<usize>>,
+            hit: Arc<AtomicUsize>,
+        }
+
+        impl Sender {
+            fn send(&self, val: usize) {
+                if self.slot.try_produce(val).is_ok() {
+                    return
+                }
+                let me = thread::current();
+                self.hit.store(0, Ordering::SeqCst);
+                let hit = self.hit.clone();
+                self.slot.on_empty(move |_slot| {
+                    hit.store(1, Ordering::SeqCst);
+                    me.unpark();
+                });
+                while self.hit.load(Ordering::SeqCst) == 0 {
+                    thread::park();
+                }
+                self.slot.try_produce(val).expect("can't produce after on_empty")
+            }
+        }
+
+        impl Receiver {
+            fn recv(&self) -> usize {
+                if let Ok(i) = self.slot.try_consume() {
+                    return i
+                }
+
+                let me = thread::current();
+                self.hit.store(0, Ordering::SeqCst);
+                let hit = self.hit.clone();
+                self.slot.on_full(move |_slot| {
+                    hit.store(1, Ordering::SeqCst);
+                    me.unpark();
+                });
+                while self.hit.load(Ordering::SeqCst) == 0 {
+                    thread::park();
+                }
+                self.slot.try_consume().expect("can't consume after on_full")
+            }
+        }
+
+        let slot = Arc::new(Slot::new(None));
+        let slot2 = slot.clone();
+
+        let tx = Sender { slot: slot2, hit: Arc::new(AtomicUsize::new(0)) };
+        let rx = Receiver { slot: slot, hit: Arc::new(AtomicUsize::new(0)) };
+
+        let a = thread::spawn(move || {
+            for i in 0..N {
+                assert_eq!(rx.recv(), i);
+            }
+        });
+
+        for i in 0..N {
+            tx.send(i);
+        }
+
+        a.join().unwrap();
+    }
+
+    #[test]
+    fn cancel() {
+        let slot = Slot::new(None);
+        let hits = Arc::new(AtomicUsize::new(0));
+
+        let add = || {
+            let hits = hits.clone();
+            move |_: &Slot<u32>| { hits.fetch_add(1, Ordering::SeqCst); }
+        };
+
+        // cancel on_full
+        let n = hits.load(Ordering::SeqCst);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        let token = slot.on_full(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        slot.cancel(token);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        assert!(slot.try_consume().is_err());
+        assert!(slot.try_produce(1).is_ok());
+        assert!(slot.try_consume().is_ok());
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+
+        // cancel on_empty
+        let n = hits.load(Ordering::SeqCst);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        slot.try_produce(1).unwrap();
+        let token = slot.on_empty(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        slot.cancel(token);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        assert!(slot.try_produce(1).is_err());
+
+        // cancel with no effect
+        let n = hits.load(Ordering::SeqCst);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        let token = slot.on_full(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        slot.cancel(token);
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        assert!(slot.try_consume().is_ok());
+        let token = slot.on_empty(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n + 2);
+        slot.cancel(token);
+        assert_eq!(hits.load(Ordering::SeqCst), n + 2);
+
+        // cancel old ones don't count
+        let n = hits.load(Ordering::SeqCst);
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        let token1 = slot.on_full(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n);
+        assert!(slot.try_produce(1).is_ok());
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        assert!(slot.try_consume().is_ok());
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        let token2 = slot.on_full(add());
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        slot.cancel(token1);
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+        slot.cancel(token2);
+        assert_eq!(hits.load(Ordering::SeqCst), n + 1);
+    }
+}
diff --git a/futures-rs-test-all/src/store.rs b/futures-rs-test-all/src/store.rs
new file mode 100644
index 000000000..5d9e8c143
--- /dev/null
+++ b/futures-rs-test-all/src/store.rs
@@ -0,0 +1,37 @@
+use std::marker;
+use std::any::Any;
+
+use {Task, TaskData, Poll, Future};
+
+/// A combinator which will store some data into task-local storage.
+///
+/// This combinator is created by the `futures::store` method.
+pub struct Store<T: Send + 'static, E> {
+    item: Option<T>,
+    _marker: marker::PhantomData<fn() -> E>,
+}
+
+/// A combinator to store some data into task-local storage.
+pub fn store<T, E>(t: T) -> Store<T, E>
+    where T: Any + Send + 'static,
+          E: Send + 'static,
+{
+    Store { item: Some(t), _marker: marker::PhantomData }
+}
+
+impl<T, E> Future for Store<T, E>
+    where T: Any + Send + 'static,
+          E: Send + 'static,
+{
+    type Item = TaskData<T>;
+    type Error = E;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<TaskData<T>, E> {
+        let item = self.item.take().expect("cannot poll Store twice");
+        Poll::Ok(task.insert(item))
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        task.notify()
+    }
+}
diff --git a/futures-rs-test-all/src/stream/and_then.rs b/futures-rs-test-all/src/stream/and_then.rs
new file mode 100644
index 000000000..49a2d6f80
--- /dev/null
+++ b/futures-rs-test-all/src/stream/and_then.rs
@@ -0,0 +1,59 @@
+use {Task, IntoFuture, Future, Poll};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto values produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::and_then` method.
+pub struct AndThen<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U + Send + 'static,
+          U: IntoFuture<Error=S::Error>,
+{
+    AndThen {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+impl<S, F, U> Stream for AndThen<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Item) -> U + Send + 'static,
+          U: IntoFuture<Error=S::Error>,
+{
+    type Item = U::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<U::Item>, S::Error> {
+        if self.future.is_none() {
+            let item = match try_poll!(self.stream.poll(task)) {
+                Ok(None) => return Poll::Ok(None),
+                Ok(Some(e)) => e,
+                Err(e) => return Poll::Err(e),
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        let res = self.future.as_mut().unwrap().poll(task);
+        if res.is_ready() {
+            self.future = None;
+        }
+        res.map(Some)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.future {
+            Some(ref mut s) => s.schedule(task),
+            None => self.stream.schedule(task),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/buffered.rs b/futures-rs-test-all/src/stream/buffered.rs
new file mode 100644
index 000000000..0cf19d3fe
--- /dev/null
+++ b/futures-rs-test-all/src/stream/buffered.rs
@@ -0,0 +1,99 @@
+use {Task, IntoFuture, Poll};
+use stream::{Stream, Fuse};
+use util::Collapsed;
+
+/// An adaptor for a stream of futures to execute the futures concurrently, if
+/// possible.
+///
+/// This adaptor will buffer up a list of pending futures, and then return their
+/// results in the order that they're finished. This is created by the
+/// `Stream::buffered` method.
+pub struct Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture,
+{
+    stream: Fuse<S>,
+    futures: Vec<Option<Collapsed<<S::Item as IntoFuture>::Future>>>,
+}
+
+pub fn new<S>(s: S, amt: usize) -> Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    Buffered {
+        stream: super::fuse::new(s),
+        futures: (0..amt).map(|_| None).collect(),
+    }
+}
+
+impl<S> Stream for Buffered<S>
+    where S: Stream,
+          S::Item: IntoFuture<Error=<S as Stream>::Error>,
+{
+    type Item = <S::Item as IntoFuture>::Item;
+    type Error = <S as Stream>::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<Self::Item>, Self::Error> {
+        let mut any_some = false;
+        for f in self.futures.iter_mut() {
+            // First, if this slot is empty, try to fill it in. If we fill it in
+            // we're careful to use TOKENS_ALL for the next poll() below.
+            if f.is_none() {
+                match self.stream.poll(task) {
+                    Poll::Ok(Some(e)) => {
+                        *f = Some(Collapsed::Start(e.into_future()));
+                    }
+                    Poll::Err(e) => return Poll::Err(e),
+                    Poll::Ok(None) |
+                    Poll::NotReady => continue,
+                }
+            }
+
+            // If we're here then our slot is full, so we unwrap it and poll it.
+            let ret = {
+                let future = f.as_mut().unwrap();
+                match future.poll(task) {
+                    Poll::Ok(e) => Poll::Ok(Some(e)),
+                    Poll::Err(e) => Poll::Err(e),
+
+                    // TODO: should this happen here or elsewhere?
+                    Poll::NotReady => {
+                        future.collapse();
+                        any_some = true;
+                        continue
+                    }
+                }
+            };
+
+            // Ok, that future is done, so we chuck it out and return its value.
+            // Next time we're poll()'d it'll get filled in again.
+            *f = None;
+            return ret
+        }
+
+        if any_some || !self.stream.is_done() {
+            Poll::NotReady
+        } else {
+            Poll::Ok(None)
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        let mut any_none = false;
+        // Primarily we're interested in all our pending futures, so schedule a
+        // callback on all of them.
+        for f in self.futures.iter_mut() {
+            match *f {
+                Some(ref mut f) => f.schedule(task),
+                None => any_none = true,
+            }
+        }
+
+        // If any slot was None, then we're also interested in the stream, but
+        // if all slots were taken we're not actually interested in the stream.
+        if any_none {
+            self.stream.schedule(task);
+        }
+    }
+}
+
diff --git a/futures-rs-test-all/src/stream/channel.rs b/futures-rs-test-all/src/stream/channel.rs
new file mode 100644
index 000000000..2d8f3fd46
--- /dev/null
+++ b/futures-rs-test-all/src/stream/channel.rs
@@ -0,0 +1,188 @@
+use std::sync::Arc;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+use {Future, Task, Poll};
+use slot::{Slot, Token};
+use stream::Stream;
+
+/// Creates an in-memory channel implementation of the `Stream` trait.
+///
+/// This method creates a concrete implementation of the `Stream` trait which
+/// can be used to send values across threads in a streaming fashion. This
+/// channel is unique in that it implements back pressure to ensure that the
+/// sender never outpaces the receiver. The `Sender::send` method will only
+/// allow sending one message and the next message can only be sent once the
+/// first was consumed.
+///
+/// The `Receiver` returned implements the `Stream` trait and has access to any
+/// number of the associated combinators for transforming the result.
+pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>)
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    let inner = Arc::new(Inner {
+        slot: Slot::new(None),
+        receiver_gone: AtomicBool::new(false),
+    });
+    let sender = Sender {
+        inner: inner.clone(),
+    };
+    let receiver = Receiver {
+        inner: inner,
+        on_full_token: None,
+    };
+    (sender, receiver)
+}
+
+/// The transmission end of a channel which is used to send values.
+///
+/// This is created by the `channel` method in the `stream` module.
+pub struct Sender<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    inner: Arc<Inner<T, E>>,
+}
+
+/// A future returned by the `Sender::send` method which will resolve to the
+/// sender once it's available to send another message.
+pub struct FutureSender<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    sender: Option<Sender<T, E>>,
+    data: Option<Result<T, E>>,
+}
+
+/// The receiving end of a channel which implements the `Stream` trait.
+///
+/// This is a concrete implementation of a stream which can be used to represent
+/// a stream of values being computed elsewhere. This is created by the
+/// `channel` method in the `stream` module.
+pub struct Receiver<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    inner: Arc<Inner<T, E>>,
+    on_full_token: Option<Token>,
+}
+
+struct Inner<T, E> {
+    slot: Slot<Message<Result<T, E>>>,
+    receiver_gone: AtomicBool,
+}
+
+enum Message<T> {
+    Data(T),
+    Done,
+}
+
+pub struct SendError<T, E>(Result<T, E>);
+
+impl<T, E> Stream for Receiver<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, _task: &mut Task) -> Poll<Option<T>, E> {
+        // TODO: disconnect?
+        match self.inner.slot.try_consume() {
+            Ok(Message::Data(Ok(e))) => Poll::Ok(Some(e)),
+            Ok(Message::Data(Err(e))) => Poll::Err(e),
+            Ok(Message::Done) => Poll::Ok(None),
+            Err(..) => Poll::NotReady,
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if let Some(token) = self.on_full_token.take() {
+            self.inner.slot.cancel(token);
+        }
+
+        let handle = task.handle().clone();
+        self.on_full_token = Some(self.inner.slot.on_full(move |_| {
+            handle.notify();
+        }));
+    }
+}
+
+impl<T, E> Drop for Receiver<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    fn drop(&mut self) {
+        self.inner.receiver_gone.store(true, Ordering::SeqCst);
+        if let Some(token) = self.on_full_token.take() {
+            self.inner.slot.cancel(token);
+        }
+        self.inner.slot.on_full(|slot| {
+            drop(slot.try_consume());
+        });
+    }
+}
+
+impl<T, E> Sender<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    /// Sends a new value along this channel to the receiver.
+    ///
+    /// This method consumes the sender and returns a future which will resolve
+    /// to the sender again when the value sent has been consumed.
+    pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> {
+        FutureSender {
+            sender: Some(self),
+            data: Some(t),
+        }
+    }
+}
+
+impl<T, E> Drop for Sender<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    fn drop(&mut self) {
+        self.inner.slot.on_empty(|slot| {
+            slot.try_produce(Message::Done).ok().unwrap();
+        });
+    }
+}
+
+impl<T, E> Future for FutureSender<T, E>
+    where T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = Sender<T, E>;
+    type Error = SendError<T, E>;
+
+    fn poll(&mut self, _task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        let data = self.data.take().expect("cannot poll FutureSender twice");
+        let sender = self.sender.take().expect("cannot poll FutureSender twice");
+        match sender.inner.slot.try_produce(Message::Data(data)) {
+            Ok(()) => return Poll::Ok(sender),
+            Err(e) => {
+                self.data = Some(match e.into_inner() {
+                    Message::Data(data) => data,
+                    Message::Done => panic!(),
+                });
+                self.sender = Some(sender);
+                Poll::NotReady
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.sender {
+            Some(ref s) => {
+                let handle = task.handle().clone();
+                // TODO: don't drop token?
+                s.inner.slot.on_empty(move |_slot| {
+                    handle.notify();
+                });
+            }
+            None => task.notify(),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/collect.rs b/futures-rs-test-all/src/stream/collect.rs
new file mode 100644
index 000000000..80277062e
--- /dev/null
+++ b/futures-rs-test-all/src/stream/collect.rs
@@ -0,0 +1,51 @@
+use std::mem;
+
+use {Task, Future, Poll};
+use stream::Stream;
+
+/// A future which collects all of the values of a stream into a vector.
+///
+/// This future is created by the `Stream::collect` method.
+pub struct Collect<S> where S: Stream {
+    stream: S,
+    items: Vec<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Collect<S>
+    where S: Stream,
+{
+    Collect {
+        stream: s,
+        items: Vec::new(),
+    }
+}
+
+impl<S: Stream> Collect<S> {
+    fn finish(&mut self) -> Vec<S::Item> {
+        mem::replace(&mut self.items, Vec::new())
+    }
+}
+
+impl<S> Future for Collect<S>
+    where S: Stream,
+{
+    type Item = Vec<S::Item>;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Vec<S::Item>, S::Error> {
+        loop {
+            match try_poll!(self.stream.poll(task)) {
+                Ok(Some(e)) => self.items.push(e),
+                Ok(None) => return Poll::Ok(self.finish()),
+                Err(e) => {
+                    self.finish();
+                    return Poll::Err(e)
+                }
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/filter.rs b/futures-rs-test-all/src/stream/filter.rs
new file mode 100644
index 000000000..ba3f02247
--- /dev/null
+++ b/futures-rs-test-all/src/stream/filter.rs
@@ -0,0 +1,47 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream combinator used to filter the results of a stream and only yield
+/// some values.
+///
+/// This structure is produced by the `Stream::filter` method.
+pub struct Filter<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F>(s: S, f: F) -> Filter<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> bool + Send + 'static,
+{
+    Filter {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> Stream for Filter<S, F>
+    where S: Stream,
+          F: FnMut(&S::Item) -> bool + Send + 'static,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, S::Error> {
+        loop {
+            match try_poll!(self.stream.poll(task)) {
+                Ok(Some(e)) => {
+                    if (self.f)(&e) {
+                        return Poll::Ok(Some(e))
+                    }
+                }
+                Ok(None) => return Poll::Ok(None),
+                Err(e) => return Poll::Err(e),
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/filter_map.rs b/futures-rs-test-all/src/stream/filter_map.rs
new file mode 100644
index 000000000..1cc97ce8b
--- /dev/null
+++ b/futures-rs-test-all/src/stream/filter_map.rs
@@ -0,0 +1,48 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A combinator used to filter the results of a stream and simultaneously map
+/// them to a different type.
+///
+/// This structure is returned by the `Stream::filter_map` method.
+pub struct FilterMap<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Option<B> + Send + 'static,
+{
+    FilterMap {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F, B> Stream for FilterMap<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Option<B> + Send + 'static,
+          B: Send + 'static,
+{
+    type Item = B;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<B>, S::Error> {
+        loop {
+            match try_poll!(self.stream.poll(task)) {
+                Ok(Some(e)) => {
+                    if let Some(e) = (self.f)(e) {
+                        return Poll::Ok(Some(e))
+                    }
+                }
+                Ok(None) => return Poll::Ok(None),
+                Err(e) => return Poll::Err(e),
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/flatten.rs b/futures-rs-test-all/src/stream/flatten.rs
new file mode 100644
index 000000000..c92e86913
--- /dev/null
+++ b/futures-rs-test-all/src/stream/flatten.rs
@@ -0,0 +1,58 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A combinator used to flatten a stream-of-streams into one long stream of
+/// elements.
+///
+/// This combinator is created by the `Stream::flatten` method.
+pub struct Flatten<S>
+    where S: Stream,
+{
+    stream: S,
+    next: Option<S::Item>,
+}
+
+pub fn new<S>(s: S) -> Flatten<S>
+    where S: Stream,
+          S::Item: Stream,
+          <S::Item as Stream>::Error: From<S::Error>,
+{
+    Flatten {
+        stream: s,
+        next: None,
+    }
+}
+
+impl<S> Stream for Flatten<S>
+    where S: Stream,
+          S::Item: Stream,
+          <S::Item as Stream>::Error: From<S::Error>,
+{
+    type Item = <S::Item as Stream>::Item;
+    type Error = <S::Item as Stream>::Error;
+
+    fn poll(&mut self, task: &mut Task)
+            -> Poll<Option<Self::Item>, Self::Error> {
+        loop {
+            if self.next.is_none() {
+                match try_poll!(self.stream.poll(task)) {
+                    Ok(Some(e)) => self.next = Some(e),
+                    Ok(None) => return Poll::Ok(None),
+                    Err(e) => return Poll::Err(From::from(e)),
+                }
+            }
+            assert!(self.next.is_some());
+            match self.next.as_mut().unwrap().poll(task) {
+                Poll::Ok(None) => self.next = None,
+                other => return other,
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.next {
+            Some(ref mut s) => s.schedule(task),
+            None => self.stream.schedule(task),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/fold.rs b/futures-rs-test-all/src/stream/fold.rs
new file mode 100644
index 000000000..5a6d1f0e6
--- /dev/null
+++ b/futures-rs-test-all/src/stream/fold.rs
@@ -0,0 +1,89 @@
+use std::mem;
+
+use {Task, Future, Poll, IntoFuture};
+use stream::Stream;
+
+/// A future used to collect all the results of a stream into one generic type.
+///
+/// This future is returned by the `Stream::fold` method.
+pub struct Fold<S, F, Fut, T> where Fut: IntoFuture {
+    stream: S,
+    f: F,
+    state: State<T, Fut::Future>,
+}
+
+enum State<T, Fut> {
+    /// Placeholder state when doing work
+    Empty,
+
+    /// Ready to process the next stream item; current accumulator is the `T`
+    Ready(T),
+
+    /// Working on a future the process the previous stream item
+    Processing(Fut),
+}
+
+pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
+    where S: Stream,
+          F: FnMut(T, S::Item) -> Fut + Send + 'static,
+          Fut: IntoFuture<Item = T>,
+          Fut::Error: Into<S::Error>,
+          T: Send + 'static
+{
+    Fold {
+        stream: s,
+        f: f,
+        state: State::Ready(t),
+    }
+}
+
+impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
+    where S: Stream,
+          F: FnMut(T, S::Item) -> Fut + Send + 'static,
+          Fut: IntoFuture<Item = T>,
+          Fut::Error: Into<S::Error>,
+          T: Send + 'static
+{
+    type Item = T;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<T, S::Error> {
+        loop {
+            match mem::replace(&mut self.state, State::Empty) {
+                State::Empty => panic!("cannot poll Fold twice"),
+                State::Ready(state) => {
+                    match self.stream.poll(task) {
+                        Poll::Ok(Some(e)) => {
+                            let future = (self.f)(state, e);
+                            self.state = State::Processing(future.into_future());
+                        }
+                        Poll::Ok(None) => return Poll::Ok(state),
+                        Poll::Err(e) => return Poll::Err(e),
+                        Poll::NotReady => {
+                            self.state = State::Ready(state);
+                            return Poll::NotReady
+                        }
+                    }
+                }
+                State::Processing(mut fut) => {
+                    match fut.poll(task) {
+                        Poll::Ok(state) => self.state = State::Ready(state),
+                        Poll::Err(e) => return Poll::Err(e.into()),
+                        Poll::NotReady => {
+                            self.state = State::Processing(fut);
+                            return Poll::NotReady;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.state {
+            State::Empty => panic!("cannot `schedule` a completed Fold"),
+            State::Ready(_) => self.stream.schedule(task),
+            State::Processing(ref mut fut) => fut.schedule(task),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/for_each.rs b/futures-rs-test-all/src/stream/for_each.rs
new file mode 100644
index 000000000..b073bb498
--- /dev/null
+++ b/futures-rs-test-all/src/stream/for_each.rs
@@ -0,0 +1,48 @@
+use {Future, Task, Poll};
+use stream::Stream;
+
+/// A stream combinator which executes a unit closure over each item on a
+/// stream.
+///
+/// This structure is returned by the `Stream::for_each` method.
+pub struct ForEach<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F>(s: S, f: F) -> ForEach<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Result<(), S::Error> + Send + 'static
+{
+    ForEach {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F> Future for ForEach<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> Result<(), S::Error> + Send + 'static
+{
+    type Item = ();
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<(), S::Error> {
+        loop {
+            match try_poll!(self.stream.poll(task)) {
+                Ok(Some(e)) => {
+                    match (self.f)(e) {
+                        Ok(()) => {}
+                        Err(e) => return Poll::Err(e),
+                    }
+                }
+                Ok(None) => return Poll::Ok(()),
+                Err(e) => return Poll::Err(e),
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/fuse.rs b/futures-rs-test-all/src/stream/fuse.rs
new file mode 100644
index 000000000..7e358463b
--- /dev/null
+++ b/futures-rs-test-all/src/stream/fuse.rs
@@ -0,0 +1,47 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream which "fuse"s a stream once it's terminated.
+///
+/// Normally streams can behave unpredictably after they've terminated or
+/// returned an error, but `Fuse` is always defined to return `None` from `poll`
+/// after terination/errors, and afterwards all calls to `schedule` will be
+/// ignored.
+pub struct Fuse<S> {
+    stream: Option<S>,
+}
+
+pub fn new<S: Stream>(s: S) -> Fuse<S> {
+    Fuse { stream: Some(s) }
+}
+
+impl<S: Stream> Stream for Fuse<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, S::Error> {
+        let ret = self.stream.as_mut().map(|s| s.poll(task));
+        match ret {
+            Some(Poll::Ok(None)) => self.stream = None,
+            _ => {}
+        }
+        ret.unwrap_or(Poll::NotReady)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if let Some(ref mut stream) = self.stream {
+            stream.schedule(task)
+        }
+    }
+}
+
+impl<S> Fuse<S> {
+    /// Returns whether the underlying stream has finished or not.
+    ///
+    /// If this method returns `true`, then all future calls to poll are
+    /// guaranteed to return `NotReady`. If this returns `false`, then the
+    /// underlying stream is still in use.
+    pub fn is_done(&self) -> bool {
+        self.stream.is_none()
+    }
+}
diff --git a/futures-rs-test-all/src/stream/future.rs b/futures-rs-test-all/src/stream/future.rs
new file mode 100644
index 000000000..974c8157f
--- /dev/null
+++ b/futures-rs-test-all/src/stream/future.rs
@@ -0,0 +1,37 @@
+use {Task, Future, Poll};
+use stream::Stream;
+
+/// A combinator used to temporarily convert a stream into a future.
+///
+/// This future is returned by the `Stream::into_future` method.
+pub struct StreamFuture<S> {
+    stream: Option<S>,
+}
+
+pub fn new<S: Stream>(s: S) -> StreamFuture<S> {
+    StreamFuture { stream: Some(s) }
+}
+
+impl<S: Stream> Future for StreamFuture<S> {
+    type Item = (Option<S::Item>, S);
+    type Error = (S::Error, S);
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Self::Item, Self::Error> {
+        let item = {
+            let s = self.stream.as_mut().expect("polling StreamFuture twice");
+            try_poll!(s.poll(task))
+        };
+        let stream = self.stream.take().unwrap();
+
+        match item {
+            Ok(e) => Poll::Ok((e, stream)),
+            Err(e) => Poll::Err((e, stream)),
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if let Some(s) = self.stream.as_mut() {
+            s.schedule(task)
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/impls.rs b/futures-rs-test-all/src/stream/impls.rs
new file mode 100644
index 000000000..bac20dabe
--- /dev/null
+++ b/futures-rs-test-all/src/stream/impls.rs
@@ -0,0 +1,15 @@
+use {Task, Poll};
+use stream::Stream;
+
+impl<S: ?Sized + Stream> Stream for Box<S> {
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<Self::Item>, Self::Error> {
+        (**self).poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        (**self).schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/iter.rs b/futures-rs-test-all/src/stream/iter.rs
new file mode 100644
index 000000000..ceb382d19
--- /dev/null
+++ b/futures-rs-test-all/src/stream/iter.rs
@@ -0,0 +1,49 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream which is just a shim over an underlying instance of `Iterator`.
+///
+/// This stream will never block and is always ready.
+pub struct IterStream<I> {
+    iter: I,
+}
+
+/// Converts an `Iterator` into a `Stream` which is always ready to yield the
+/// next value.
+///
+/// Iterators in Rust don't express the ability to block, so this adapter simply
+/// always calls `iter.next()` and returns that. Additionally, the error type is
+/// generic here as it will never be returned, instead the type of the iterator
+/// will always be returned upwards as a successful value.
+pub fn iter<I, T, E>(i: I) -> IterStream<I>
+    where I: Iterator<Item=Result<T, E>>,
+          I: Send + 'static,
+          T: Send + 'static,
+          E: Send + 'static,
+{
+    IterStream {
+        iter: i,
+    }
+}
+
+impl<I, T, E> Stream for IterStream<I>
+    where I: Iterator<Item=Result<T, E>>,
+          I: Send + 'static,
+          T: Send + 'static,
+          E: Send + 'static,
+{
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self, _task: &mut Task) -> Poll<Option<T>, E> {
+        match self.iter.next() {
+            Some(Ok(e)) => Poll::Ok(Some(e)),
+            Some(Err(e)) => Poll::Err(e),
+            None => Poll::Ok(None),
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        task.notify()
+    }
+}
diff --git a/futures-rs-test-all/src/stream/map.rs b/futures-rs-test-all/src/stream/map.rs
new file mode 100644
index 000000000..9fcf6d282
--- /dev/null
+++ b/futures-rs-test-all/src/stream/map.rs
@@ -0,0 +1,39 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream combinator which will change the type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map` method.
+pub struct Map<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Map<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    Map {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F, U> Stream for Map<S, F>
+    where S: Stream,
+          F: FnMut(S::Item) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    type Item = U;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<U>, S::Error> {
+        self.stream.poll(task).map(|option| option.map(&mut self.f))
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/map_err.rs b/futures-rs-test-all/src/stream/map_err.rs
new file mode 100644
index 000000000..cf29ab66f
--- /dev/null
+++ b/futures-rs-test-all/src/stream/map_err.rs
@@ -0,0 +1,39 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream combinator which will change the error type of a stream from one
+/// type to another.
+///
+/// This is produced by the `Stream::map_err` method.
+pub struct MapErr<S, F> {
+    stream: S,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F>
+    where S: Stream,
+          F: FnMut(S::Error) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    MapErr {
+        stream: s,
+        f: f,
+    }
+}
+
+impl<S, F, U> Stream for MapErr<S, F>
+    where S: Stream,
+          F: FnMut(S::Error) -> U + Send + 'static,
+          U: Send + 'static,
+{
+    type Item = S::Item;
+    type Error = U;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, U> {
+        self.stream.poll(task).map_err(&mut self.f)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/merge.rs b/futures-rs-test-all/src/stream/merge.rs
new file mode 100644
index 000000000..5b65edd8d
--- /dev/null
+++ b/futures-rs-test-all/src/stream/merge.rs
@@ -0,0 +1,77 @@
+use {Task, Poll};
+use stream::{Stream, Fuse};
+
+/// An adapter for merging the output of two streams.
+///
+/// The merged stream produces items from one or both of the underlying
+/// streams as they become available. Errors, however, are not merged: you
+/// get at most one error at a time.
+pub struct Merge<S1, S2: Stream> {
+    stream1: Fuse<S1>,
+    stream2: Fuse<S2>,
+    queued_error: Option<S2::Error>,
+}
+
+pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Merge<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    Merge {
+        stream1: stream1.fuse(),
+        stream2: stream2.fuse(),
+        queued_error: None,
+    }
+}
+
+/// An item returned from a merge stream, which represents an item from one or
+/// both of the underlying streams.
+pub enum MergedItem<I1, I2> {
+    /// An item from the first stream
+    First(I1),
+    /// An item from the second stream
+    Second(I2),
+    /// Items from both streams
+    Both(I1, I2),
+}
+
+impl<S1, S2> Stream for Merge<S1, S2>
+    where S1: Stream, S2: Stream<Error = S1::Error>
+{
+    type Item = MergedItem<S1::Item, S2::Item>;
+    type Error = S1::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<Self::Item>, Self::Error> {
+        if let Some(e) = self.queued_error.take() {
+            return Poll::Err(e);
+        }
+
+        match self.stream1.poll(task) {
+            Poll::Err(e) => Poll::Err(e),
+            Poll::NotReady => match self.stream2.poll(task) {
+                Poll::Err(e) => Poll::Err(e),
+                Poll::NotReady => Poll::NotReady,
+                Poll::Ok(Some(item2)) => Poll::Ok(Some(MergedItem::Second(item2))),
+                Poll::Ok(None) => Poll::NotReady,
+            },
+            Poll::Ok(Some(item1)) => match self.stream2.poll(task) {
+                Poll::Err(e) => {
+                    self.queued_error = Some(e);
+                    Poll::Ok(Some(MergedItem::First(item1)))
+                }
+                Poll::NotReady => Poll::Ok(Some(MergedItem::First(item1))),
+                Poll::Ok(Some(item2)) => Poll::Ok(Some(MergedItem::Both(item1, item2))),
+                Poll::Ok(None) => Poll::Ok(Some(MergedItem::First(item1))),
+            },
+            Poll::Ok(None) => match self.stream2.poll(task) {
+                Poll::Err(e) =>  Poll::Err(e),
+                Poll::NotReady => Poll::NotReady,
+                Poll::Ok(Some(item2)) => Poll::Ok(Some(MergedItem::Second(item2))),
+                Poll::Ok(None) => Poll::Ok(None),
+            },
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream1.schedule(task);
+        self.stream2.schedule(task);
+    }
+}
diff --git a/futures-rs-test-all/src/stream/mod.rs b/futures-rs-test-all/src/stream/mod.rs
new file mode 100755
index 000000000..eaa8b1412
--- /dev/null
+++ b/futures-rs-test-all/src/stream/mod.rs
@@ -0,0 +1,645 @@
+//! Asynchronous streams
+//!
+//! This module contains the `Stream` trait and a number of adaptors for this
+//! trait. This trait is very similar to the `Iterator` trait in the standard
+//! library except that it expresses the concept of blocking as well. A stream
+//! here is a sequential sequence of values which may take some amount of time
+//! inbetween to produce.
+//!
+//! A stream may request that it is blocked between values while the next value
+//! is calculated, and provides a way to get notified once the next value is
+//! ready as well.
+// TODO: expand these docs
+
+use {Task, IntoFuture, Poll};
+
+mod channel;
+mod iter;
+pub use self::channel::{channel, Sender, Receiver};
+pub use self::iter::{iter, IterStream};
+
+mod and_then;
+mod buffered;
+mod collect;
+mod filter;
+mod filter_map;
+mod flatten;
+mod fold;
+mod for_each;
+mod fuse;
+mod future;
+mod map;
+mod map_err;
+mod merge;
+mod or_else;
+mod skip;
+mod skip_while;
+mod take;
+mod then;
+pub use self::and_then::AndThen;
+pub use self::buffered::Buffered;
+pub use self::collect::Collect;
+pub use self::filter::Filter;
+pub use self::filter_map::FilterMap;
+pub use self::flatten::Flatten;
+pub use self::fold::Fold;
+pub use self::for_each::ForEach;
+pub use self::fuse::Fuse;
+pub use self::future::StreamFuture;
+pub use self::map::Map;
+pub use self::map_err::MapErr;
+pub use self::merge::{Merge, MergedItem};
+pub use self::or_else::OrElse;
+pub use self::skip::Skip;
+pub use self::skip_while::SkipWhile;
+pub use self::take::Take;
+pub use self::then::Then;
+
+mod impls;
+
+/// A stream of values, not all of which have been produced yet.
+///
+/// `Stream` is a trait to represent any source of sequential events or items
+/// which acts like an iterator but may block over time. Like `Future` the
+/// methods of `Stream` never block and it is thus suitable for programming in
+/// an asynchronous fashion. This trait is very similar to the `Iterator` trait
+/// in the standard library where `Some` is used to signal elements of the
+/// stream and `None` is used to indicate that the stream is finished.
+///
+/// Like futures a stream has basic combinators to transform the stream, perform
+/// more work on each item, etc.
+///
+/// # Basic methods
+///
+/// Like futures, a `Stream` has two core methods which drive processing of data
+/// and notifications of when new data might be ready. The `poll` method checks
+/// the status of a stream and the `schedule` method is used to receive
+/// notifications for when it may be ready to call `poll` again.
+///
+/// Also like future, a stream has an associated error type to represent that an
+/// element of the computation failed for some reason. Errors, however, do not
+/// signal the end of the stream.
+// TODO: is that last clause correct?
+///
+/// # Streams as Futures
+///
+/// Any instance of `Stream` can also be viewed as a `Future` where the resolved
+/// value is the next item in the stream along with the rest of the stream. The
+/// `into_future` adaptor can be used here to convert any stream into a future
+/// for use with other future methods like `join` and `select`.
+// TODO: more here
+pub trait Stream: Send + 'static {
+    /// The type of item this stream will yield on success.
+    type Item: Send + 'static;
+
+    /// The type of error this stream may generate.
+    type Error: Send + 'static;
+
+    /// Attempt to pull out the next value of this stream, returning `None` if
+    /// it's not ready yet.
+    ///
+    /// This method, like `Future::poll`, is the sole method of pulling out a
+    /// value from a stream. The `task` argument is the task of computation that
+    /// this stream is running within, and it contains information like
+    /// task-local data and tokens of interest.
+    ///
+    /// Implementors of this trait must ensure that implementations of this
+    /// method do not block, as it may cause consumers to behave badly.
+    ///
+    /// # Return value
+    ///
+    /// If `Poll::NotReady` is returned then this stream's next value is not
+    /// ready yet, and `schedule` can be used to receive a notification for when
+    /// the value may become ready in the future. If `Some` is returned then the
+    /// returned value represents the next value on the stream. `Err` indicates
+    /// an error happened, while `Ok` indicates whether there was a new item on
+    /// the stream or whether the stream has terminated.
+    ///
+    /// # Panics
+    ///
+    /// Once a stream is finished, that is `Poll::Ok(None)` has been returned,
+    /// further calls to `poll` may result in a panic or other "bad behavior".
+    /// If this is difficult to guard against then the `fuse` adapter can be
+    /// used to ensure that `poll` always has well-defined semantics.
+    // TODO: more here
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<Self::Item>, Self::Error>;
+
+    // TODO: should there also be a method like `poll` but doesn't return an
+    //       item? basically just says "please make more progress internally"
+    //       seems crucial for buffering to actually make any sense.
+
+    /// Schedule a task to be notified when this future is ready.
+    ///
+    /// This is very similar to the `Future::schedule` method which registers
+    /// interest. The task provided will only be notified once for the next
+    /// value on a stream. If an application is interested in more values on a
+    /// stream, then a task needs to be re-scheduled.
+    ///
+    /// Multiple calls to `schedule` while waiting for one value to be produced
+    /// will only result in the final `task` getting notified. Consumers must
+    /// take care that if `schedule` is called twice the previous task does not
+    /// need to be invoked.
+    ///
+    /// Implementors of the `Stream` trait are recommended to just blindly pass
+    /// around this task rather than manufacture new tasks for contained
+    /// futures.
+    ///
+    /// When the task is notified it will be provided a set of tokens that
+    /// represent the set of events which have happened since it was last called
+    /// (or the last call to `poll`). These events can later be read during the
+    /// `poll` phase to prevent polling too much.
+    ///
+    /// # Panics
+    ///
+    /// Once a stream has returned `Ok(None)` (it's been completed) then further
+    /// calls to either `poll` or this function, `schedule`, should not be
+    /// expected to behave well. A call to `schedule` after a poll has succeeded
+    /// may panic, block forever, or otherwise exhibit odd behavior.
+    ///
+    /// Callers who may call `schedule` after a stream is finished may want to
+    /// consider using the `fuse` adaptor which defines the behavior of
+    /// `schedule` after a successful poll, but comes with a little bit of
+    /// extra cost.
+    fn schedule(&mut self, task: &mut Task);
+
+    /// Convenience function for turning this stream into a trait object.
+    ///
+    /// This simply avoids the need to write `Box::new` and can often help with
+    /// type inference as well by always returning a trait object.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel();
+    /// let a: Box<Stream<Item=i32, Error=i32>> = rx.boxed();
+    /// ```
+    fn boxed(self) -> Box<Stream<Item = Self::Item, Error = Self::Error>>
+        where Self: Sized
+    {
+        Box::new(self)
+    }
+
+    /// Converts this stream into a `Future`.
+    ///
+    /// A stream can be viewed as simply a future which will resolve to the next
+    /// element of the stream as well as the stream itself. The returned future
+    /// can be used to compose streams and futures together by placing
+    /// everything into the "world of futures".
+    fn into_future(self) -> StreamFuture<Self>
+        where Self: Sized
+    {
+        future::new(self)
+    }
+
+    /// Converts a stream of type `T` to a stream of type `U`.
+    ///
+    /// The provided closure is executed over all elements of this stream as
+    /// they are made available, and the callback will be executed inline with
+    /// calls to `poll`.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    /// let rx = rx.map(|x| x + 3);
+    /// ```
+    fn map<U, F>(self, f: F) -> Map<Self, F>
+        where F: FnMut(Self::Item) -> U + Send + 'static,
+              U: Send + 'static,
+              Self: Sized
+    {
+        map::new(self, f)
+    }
+
+    /// Converts a stream of error type `T` to a stream of error type `U`.
+    ///
+    /// The provided closure is executed over all errors of this stream as
+    /// they are made available, and the callback will be executed inline with
+    /// calls to `poll`.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `map_err` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    /// let rx = rx.map_err(|x| x + 3);
+    /// ```
+    fn map_err<U, F>(self, f: F) -> MapErr<Self, F>
+        where F: FnMut(Self::Error) -> U + Send + 'static,
+              U: Send + 'static,
+              Self: Sized
+    {
+        map_err::new(self, f)
+    }
+
+    /// Filters the values produced by this stream according to the provided
+    /// predicate.
+    ///
+    /// As values of this stream are made available, the provided predicate will
+    /// be run against them. If the predicate returns `true` then the stream
+    /// will yield the value, but if the predicate returns `false` then the
+    /// value will be discarded and the next value will be produced.
+    ///
+    /// All errors are passed through without filtering in this combinator.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `filter` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    /// let evens = rx.filter(|x| x % 0 == 2);
+    /// ```
+    fn filter<F>(self, f: F) -> Filter<Self, F>
+        where F: FnMut(&Self::Item) -> bool + Send + 'static,
+              Self: Sized
+    {
+        filter::new(self, f)
+    }
+
+    /// Filters the values produced by this stream while simultaneously mapping
+    /// them to a different type.
+    ///
+    /// As values of this stream are made available, the provided function will
+    /// be run on them. If the predicate returns `Some(e)` then the stream will
+    /// yield the value `e`, but if the predicate returns `None` then the next
+    /// value will be produced.
+    ///
+    /// All errors are passed through without filtering in this combinator.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it, similar to the existing `filter_map` methods in the
+    /// standard library.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    /// let evens_plus_one = rx.filter_map(|x| {
+    ///     if x % 0 == 2 {
+    ///         Some(x + 1)
+    ///     } else {
+    ///         None
+    ///     }
+    /// });
+    /// ```
+    fn filter_map<F, B>(self, f: F) -> FilterMap<Self, F>
+        where F: FnMut(Self::Item) -> Option<B> + Send + 'static,
+              Self: Sized
+    {
+        filter_map::new(self, f)
+    }
+
+    /// Chain on a computation for when a value is ready, passing the resulting
+    /// item to the provided closure `f`.
+    ///
+    /// This function can be used to ensure a computation runs regardless of
+    /// the next value on the stream. The closure provided will be yielded a
+    /// `Result` once a value is ready, and the returned future will then be run
+    /// to completion to produce the next value on this stream.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    ///
+    /// let rx = rx.then(|result| {
+    ///     match result {
+    ///         Ok(e) => Ok(e + 3),
+    ///         Err(e) => Err(e - 4),
+    ///     }
+    /// });
+    /// ```
+    fn then<F, U>(self, f: F) -> Then<Self, F, U>
+        where F: FnMut(Result<Self::Item, Self::Error>) -> U + Send + 'static,
+              U: IntoFuture,
+              Self: Sized
+    {
+        then::new(self, f)
+    }
+
+    /// Chain on a computation for when a value is ready, passing the successful
+    /// results to the provided closure `f`.
+    ///
+    /// This function can be used run a unit of work when the next successful
+    /// value on a stream is ready. The closure provided will be yielded a value
+    /// when ready, and the returned future will then be run to completion to
+    /// produce the next value on this stream.
+    ///
+    /// Any errors produced by this stream will not be passed to the closure,
+    /// and will be passed through.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    ///
+    /// let rx = rx.and_then(|result| {
+    ///     if result % 2 == 0 {
+    ///         Ok(result)
+    ///     } else {
+    ///         Err(result as u32)
+    ///     }
+    /// });
+    /// ```
+    fn and_then<F, U>(self, f: F) -> AndThen<Self, F, U>
+        where F: FnMut(Self::Item) -> U + Send + 'static,
+              U: IntoFuture<Error = Self::Error>,
+              Self: Sized
+    {
+        and_then::new(self, f)
+    }
+
+    /// Chain on a computation for when an error happens, passing the
+    /// erroneous result to the provided closure `f`.
+    ///
+    /// This function can be used run a unit of work and attempt to recover from
+    /// an error if one happens. The closure provided will be yielded an error
+    /// when one appears, and the returned future will then be run to completion
+    /// to produce the next value on this stream.
+    ///
+    /// Any successful values produced by this stream will not be passed to the
+    /// closure, and will be passed through.
+    ///
+    /// The returned value of the closure must implement the `IntoFuture` trait
+    /// and can represent some more work to be done before the composed stream
+    /// is finished. Note that the `Result` type implements the `IntoFuture`
+    /// trait so it is possible to simply alter the `Result` yielded to the
+    /// closure and return it.
+    ///
+    /// Note that this function consumes the receiving future and returns a
+    /// wrapped version of it.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::stream::*;
+    ///
+    /// let (_tx, rx) = channel::<i32, u32>();
+    ///
+    /// let rx = rx.or_else(|result| {
+    ///     if result % 2 == 0 {
+    ///         Ok(result as i32)
+    ///     } else {
+    ///         Err(result)
+    ///     }
+    /// });
+    /// ```
+    fn or_else<F, U>(self, f: F) -> OrElse<Self, F, U>
+        where F: FnMut(Self::Error) -> U + Send + 'static,
+              U: IntoFuture<Item = Self::Item>,
+              Self: Sized
+    {
+        or_else::new(self, f)
+    }
+
+    /// Collect all of the values of this stream into a vector, returning a
+    /// future representing the result of that computation.
+    ///
+    /// This combinator will collect all successful results of this stream and
+    /// collect them into a `Vec<Self::Item>`. If an error happens then all
+    /// collected elements will be dropped and the error will be returned.
+    ///
+    /// The returned future will be resolved whenever an error happens or when
+    /// the stream returns `Ok(None)`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::{finished, Future, Task, Poll};
+    /// use futures::stream::*;
+    ///
+    /// let (tx, rx) = channel::<i32, u32>();
+    ///
+    /// fn send(n: i32, tx: Sender<i32, u32>)
+    ///         -> Box<Future<Item=(), Error=()>> {
+    ///     if n == 0 {
+    ///         return finished(()).boxed()
+    ///     }
+    ///     tx.send(Ok(n)).map_err(|_| ()).and_then(move |tx| {
+    ///         send(n - 1, tx)
+    ///     }).boxed()
+    /// }
+    ///
+    /// send(5, tx).forget();
+    ///
+    /// let mut result = rx.collect();
+    /// assert_eq!(result.poll(&mut Task::new()),
+    ///            Poll::Ok(vec![5, 4, 3, 2, 1]));
+    /// ```
+    fn collect(self) -> Collect<Self>
+        where Self: Sized
+    {
+        collect::new(self)
+    }
+
+    /// Execute an accumulating computation over a stream, collecting all the
+    /// values into one final result.
+    ///
+    /// This combinator will collect all successful results of this stream
+    /// according to the closure provided. The initial state is also provided to
+    /// this method and then is returned again by each execution of the closure.
+    /// Once the entire stream has been exhausted the returned future will
+    /// resolve to this value.
+    ///
+    /// If an error happens then collected state will be dropped and the error
+    /// will be returned.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use futures::{finished, Future, Task, Poll};
+    /// use futures::stream::*;
+    ///
+    /// let (tx, rx) = channel::<i32, u32>();
+    ///
+    /// fn send(n: i32, tx: Sender<i32, u32>)
+    ///         -> Box<Future<Item=(), Error=()>> {
+    ///     if n == 0 {
+    ///         return finished(()).boxed()
+    ///     }
+    ///     tx.send(Ok(n)).map_err(|_| ()).and_then(move |tx| {
+    ///         send(n - 1, tx)
+    ///     }).boxed()
+    /// }
+    ///
+    /// send(5, tx).forget();
+    ///
+    /// let mut result = rx.fold(0, |a, b| finished::<i32, u32>(a + b));
+    /// assert_eq!(result.poll(&mut Task::new()), Poll::Ok(15));
+    /// ```
+    fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
+        where F: FnMut(T, Self::Item) -> Fut + Send + 'static,
+              Fut: IntoFuture<Item = T>,
+              Fut::Error: Into<Self::Error>,
+              T: Send + 'static,
+              Self: Sized
+    {
+        fold::new(self, f, init)
+    }
+
+    /// Flattens a stream of streams into just one continuous stream.
+    ///
+    /// If this stream's elements are themselves streams then this combinator
+    /// will flatten out the entire stream to one long chain of elements. Any
+    /// errors are passed through without looking at them, but otherwise each
+    /// individual stream will get exhausted before moving on to the next.
+    ///
+    /// ```
+    /// use futures::{finished, Future, Task, Poll};
+    /// use futures::stream::*;
+    ///
+    /// let (tx1, rx1) = channel::<i32, u32>();
+    /// let (tx2, rx2) = channel::<i32, u32>();
+    /// let (tx3, rx3) = channel::<_, u32>();
+    ///
+    /// tx1.send(Ok(1)).and_then(|tx1| tx1.send(Ok(2))).forget();
+    /// tx2.send(Ok(3)).and_then(|tx2| tx2.send(Ok(4))).forget();
+    ///
+    /// tx3.send(Ok(rx1)).and_then(|tx3| tx3.send(Ok(rx2))).forget();
+    ///
+    /// let mut result = rx3.flatten().collect();
+    /// assert_eq!(result.poll(&mut Task::new()), Poll::Ok(vec![1, 2, 3, 4]));
+    /// ```
+    fn flatten(self) -> Flatten<Self>
+        where Self::Item: Stream,
+              <Self::Item as Stream>::Error: From<Self::Error>,
+              Self: Sized
+    {
+        flatten::new(self)
+    }
+
+    /// Skip elements on this stream while the predicate provided resolves to
+    /// `true`.
+    ///
+    /// This function, like `Iterator::skip_while`, will skip elements on the
+    /// stream until the `predicate` resolves to `false`. Once one element
+    /// returns false all future elements will be returned from the underlying
+    /// stream.
+    fn skip_while<P, R>(self, pred: P) -> SkipWhile<Self, P, R>
+        where P: FnMut(&Self::Item) -> R + Send + 'static,
+              R: IntoFuture<Item=bool, Error=Self::Error>,
+              Self: Sized
+    {
+        skip_while::new(self, pred)
+    }
+
+    // TODO: should this closure return a result?
+    #[allow(missing_docs)]
+    fn for_each<F>(self, f: F) -> ForEach<Self, F>
+        where F: FnMut(Self::Item) -> Result<(), Self::Error> + Send + 'static,
+              Self: Sized
+    {
+        for_each::new(self, f)
+    }
+
+    /// Creates a new stream of at most `amt` items.
+    ///
+    /// Once `amt` items have been yielded from this stream then it will always
+    /// return that the stream is done.
+    fn take(self, amt: u64) -> Take<Self>
+        where Self: Sized
+    {
+        take::new(self, amt)
+    }
+
+    /// Creates a new stream which skips `amt` items of the underlying stream.
+    ///
+    /// Once `amt` items have been skipped from this stream then it will always
+    /// return the remaining items on this stream.
+    fn skip(self, amt: u64) -> Skip<Self>
+        where Self: Sized
+    {
+        skip::new(self, amt)
+    }
+
+    /// Fuse a stream such that `poll`/`schedule` will never again be called
+    /// once it has terminated (signaled emptyness or an error).
+    ///
+    /// Currently once a stream has returned `Some(Ok(None))` from `poll` any further
+    /// calls could exhibit bad behavior such as block forever, panic, never
+    /// return, etc. If it is known that `poll` may be called too often then
+    /// this method can be used to ensure that it has defined semantics.
+    ///
+    /// Once a stream has been `fuse`d and it terminates, then
+    /// it will forever return `None` from `poll` again (never resolve). This,
+    /// unlike the trait's `poll` method, is guaranteed.
+    ///
+    /// Additionally, once a stream has completed, this `Fuse` combinator will
+    /// never call `schedule` on the underlying stream.
+    fn fuse(self) -> Fuse<Self>
+        where Self: Sized
+    {
+        fuse::new(self)
+    }
+
+    /// An adaptor for creating a buffered list of pending futures.
+    ///
+    /// If this stream's item can be converted into a future, then this adaptor
+    /// will buffer up to `amt` futures and then return results in the order
+    /// that the futures are completed. No more than `amt` futures will be
+    /// buffered at any point in time, and less than `amt` may also be buffered
+    /// depending on the state of each future.
+    ///
+    /// The returned stream will be a stream of each future's result, with
+    /// errors passed through whenever they occur.
+    fn buffered(self, amt: usize) -> Buffered<Self>
+        where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
+              Self: Sized
+    {
+        buffered::new(self, amt)
+    }
+
+    /// An adapter for merging the output of two streams.
+    ///
+    /// The merged stream produces items from one or both of the underlying
+    /// streams as they become available. Errors, however, are not merged: you
+    /// get at most one error at a time.
+    fn merge<S>(self, other: S) -> Merge<Self, S>
+        where S: Stream<Error = Self::Error>,
+              Self: Sized,
+    {
+        merge::new(self, other)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/or_else.rs b/futures-rs-test-all/src/stream/or_else.rs
new file mode 100644
index 000000000..a26621992
--- /dev/null
+++ b/futures-rs-test-all/src/stream/or_else.rs
@@ -0,0 +1,58 @@
+use {Task, IntoFuture, Future, Poll};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto errors produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::or_else` method.
+pub struct OrElse<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> OrElse<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Error) -> U + Send + 'static,
+          U: IntoFuture<Item=S::Item>,
+{
+    OrElse {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+impl<S, F, U> Stream for OrElse<S, F, U>
+    where S: Stream,
+          F: FnMut(S::Error) -> U + Send + 'static,
+          U: IntoFuture<Item=S::Item>,
+{
+    type Item = S::Item;
+    type Error = U::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, U::Error> {
+        if self.future.is_none() {
+            let item = match try_poll!(self.stream.poll(task)) {
+                Ok(e) => return Poll::Ok(e),
+                Err(e) => e,
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        let res = self.future.as_mut().unwrap().poll(task);
+        if res.is_ready() {
+            self.future = None;
+        }
+        res.map(Some)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.future {
+            Some(ref mut s) => s.schedule(task),
+            None => self.stream.schedule(task),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/skip.rs b/futures-rs-test-all/src/stream/skip.rs
new file mode 100644
index 000000000..81e9de8cd
--- /dev/null
+++ b/futures-rs-test-all/src/stream/skip.rs
@@ -0,0 +1,42 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream combinator which skips a number of elements before continuing.
+///
+/// This structure is produced by the `Stream::skip` method.
+pub struct Skip<S> {
+    stream: S,
+    remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Skip<S>
+    where S: Stream,
+{
+    Skip {
+        stream: s,
+        remaining: amt,
+    }
+}
+
+impl<S> Stream for Skip<S>
+    where S: Stream,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, S::Error> {
+        while self.remaining > 0 {
+            match try_poll!(self.stream.poll(task)) {
+                Ok(Some(_)) => self.remaining -= 1,
+                Ok(None) => return Poll::Ok(None),
+                Err(e) => return Poll::Err(e),
+            }
+        }
+
+        self.stream.poll(task)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
diff --git a/futures-rs-test-all/src/stream/skip_while.rs b/futures-rs-test-all/src/stream/skip_while.rs
new file mode 100644
index 000000000..0c0997df3
--- /dev/null
+++ b/futures-rs-test-all/src/stream/skip_while.rs
@@ -0,0 +1,85 @@
+use {Task, Poll, IntoFuture, Future};
+use stream::Stream;
+
+/// A stream combinator which skips elements of a stream while a predicate
+/// holds.
+///
+/// This structure is produced by the `Stream::skip_while` method.
+pub struct SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
+    stream: S,
+    pred: P,
+    pending: Option<(R::Future, S::Item)>,
+    done_skipping: bool,
+}
+
+pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R + Send + 'static,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    SkipWhile {
+        stream: s,
+        pred: p,
+        pending: None,
+        done_skipping: false,
+    }
+}
+
+impl<S, P, R> Stream for SkipWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R + Send + 'static,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, S::Error> {
+        if self.done_skipping {
+            return self.stream.poll(task);
+        }
+
+        loop {
+            if self.pending.is_none() {
+                let item = match try_poll!(self.stream.poll(task)) {
+                    Ok(Some(e)) => e,
+                    Ok(None) => return Poll::Ok(None),
+                    Err(e) => return Poll::Err(e),
+                };
+                self.pending = Some(((self.pred)(&item).into_future(), item));
+            }
+
+            assert!(self.pending.is_some());
+            match try_poll!(self.pending.as_mut().unwrap().0.poll(task)) {
+                Ok(true) => self.pending = None,
+                Ok(false) => {
+                    let (_, item) = self.pending.take().unwrap();
+                    self.done_skipping = true;
+                    return Poll::Ok(Some(item))
+                }
+                Err(e) => {
+                    self.pending = None;
+                    return Poll::Err(e)
+                }
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.stream.schedule(task)
+    }
+}
+
+impl<S, P, R> SkipWhile<S, P, R>
+    where S: Stream,
+          P: FnMut(&S::Item) -> R + Send + 'static,
+          R: IntoFuture<Item=bool, Error=S::Error>,
+{
+    /// Consume this adaptor, returning the underlying stream.
+    ///
+    /// Note that if an element is buffered or a future is active determining
+    /// whether that element should be yielded they will both be dropped as part
+    /// of this operation.
+    pub fn into_inner(self) -> S {
+        self.stream
+    }
+}
diff --git a/futures-rs-test-all/src/stream/take.rs b/futures-rs-test-all/src/stream/take.rs
new file mode 100644
index 000000000..edded627c
--- /dev/null
+++ b/futures-rs-test-all/src/stream/take.rs
@@ -0,0 +1,48 @@
+use {Task, Poll};
+use stream::Stream;
+
+/// A stream combinator which returns a maximum number of elements.
+///
+/// This structure is produced by the `Stream::take` method.
+pub struct Take<S> {
+    stream: S,
+    remaining: u64,
+}
+
+pub fn new<S>(s: S, amt: u64) -> Take<S>
+    where S: Stream,
+{
+    Take {
+        stream: s,
+        remaining: amt,
+    }
+}
+
+impl<S> Stream for Take<S>
+    where S: Stream,
+{
+    type Item = S::Item;
+    type Error = S::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<S::Item>, S::Error> {
+        if self.remaining == 0 {
+            Poll::Ok(None)
+        } else {
+            match self.stream.poll(task) {
+                Poll::Ok(Some(e)) => {
+                    self.remaining -= 1;
+                    Poll::Ok(Some(e))
+                }
+                other => other,
+            }
+        }
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        if self.remaining == 0 {
+            task.notify()
+        } else {
+            self.stream.schedule(task)
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/then.rs b/futures-rs-test-all/src/stream/then.rs
new file mode 100644
index 000000000..67f46f538
--- /dev/null
+++ b/futures-rs-test-all/src/stream/then.rs
@@ -0,0 +1,59 @@
+use {Task, IntoFuture, Future, Poll};
+use stream::Stream;
+
+/// A stream combinator which chains a computation onto each item produced by a
+/// stream.
+///
+/// This structure is produced by the `Stream::then` method.
+pub struct Then<S, F, U>
+    where U: IntoFuture,
+{
+    stream: S,
+    future: Option<U::Future>,
+    f: F,
+}
+
+pub fn new<S, F, U>(s: S, f: F) -> Then<S, F, U>
+    where S: Stream,
+          F: FnMut(Result<S::Item, S::Error>) -> U + Send + 'static,
+          U: IntoFuture,
+{
+    Then {
+        stream: s,
+        future: None,
+        f: f,
+    }
+}
+
+impl<S, F, U> Stream for Then<S, F, U>
+    where S: Stream,
+          F: FnMut(Result<S::Item, S::Error>) -> U + Send + 'static,
+          U: IntoFuture,
+{
+    type Item = U::Item;
+    type Error = U::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<Option<U::Item>, U::Error> {
+        if self.future.is_none() {
+            let item = match try_poll!(self.stream.poll(task)) {
+                Ok(None) => return Poll::Ok(None),
+                Ok(Some(e)) => Ok(e),
+                Err(e) => Err(e),
+            };
+            self.future = Some((self.f)(item).into_future());
+        }
+        assert!(self.future.is_some());
+        let res = self.future.as_mut().unwrap().poll(task);
+        if res.is_ready() {
+            self.future = None;
+        }
+        res.map(Some)
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        match self.future {
+            Some(ref mut s) => s.schedule(task),
+            None => self.stream.schedule(task),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/stream/thread_state.rs b/futures-rs-test-all/src/stream/thread_state.rs
new file mode 100644
index 000000000..0f6f8f211
--- /dev/null
+++ b/futures-rs-test-all/src/stream/thread_state.rs
@@ -0,0 +1,46 @@
+use std::sync::Arc;
+
+use {Wake, Tokens};
+use stream::{Stream, StreamResult};
+
+pub struct ThreadState<S, State, F> {
+    stream: S,
+    state: State,
+    f: F,
+}
+
+pub fn new<S, State, F, U>(s: S, state: State, f: F) -> ThreadState<S, State, F>
+    where S: Stream,
+          F: FnMut(&mut State, S::Item) -> U + Send + 'static,
+          U: Send + 'static,
+          State: Send + 'static
+{
+    ThreadState {
+        stream: s,
+        state: state,
+        f: f,
+    }
+}
+
+impl<S, State, F, U> Stream for ThreadState<S, State, F>
+    where S: Stream,
+          F: FnMut(&mut State, S::Item) -> U + Send + 'static,
+          U: Send + 'static,
+          State: Send + 'static
+{
+    type Item = U;
+    type Error = S::Error;
+
+    fn poll(&mut self, tokens: &Tokens) -> Option<StreamResult<U, S::Error>> {
+        match self.stream.poll(tokens) {
+            Some(Ok(Some(e))) => Some(Ok(Some((self.f)(&mut self.state, e)))),
+            Some(Ok(None)) => Some(Ok(None)),
+            Some(Err(e)) => Some(Err(e)),
+            None => None,
+        }
+    }
+
+    fn schedule(&mut self, wake: &Arc<Wake>) {
+        self.stream.schedule(wake)
+    }
+}
diff --git a/futures-rs-test-all/src/task.rs b/futures-rs-test-all/src/task.rs
new file mode 100644
index 000000000..d383cdee9
--- /dev/null
+++ b/futures-rs-test-all/src/task.rs
@@ -0,0 +1,316 @@
+
+// One critical piece of this module's contents are the `TaskData<A>` handles.
+// The purpose of this is to conceptually be able to store data in a task,
+// allowing it to be accessed within multiple futures at once. For example if
+// you have some concurrent futures working, they may all want mutable access to
+// some data. We already know that when the futures are being poll'd that we're
+// entirely synchronized (aka `&mut Task`), so you shouldn't require an
+// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
+//
+// So the idea here is that you insert data into a task via `Task::insert`, and
+// a handle to that data is then returned to you. That handle can later get
+// presented to the task itself to actually retrieve the underlying data. The
+// invariant is that the data can only ever be accessed with the task present,
+// and the lifetime of the actual data returned is connected to the lifetime of
+// the task itself.
+//
+// Conceptually I at least like to think of this as "dynamically adding more
+// struct fields to a `Task`". Each call to insert creates a new "name" for the
+// struct field, a `TaskData<A>`, and then you can access the fields of a struct
+// with the struct itself (`Task`) as well as the name of the field
+// (`TaskData<A>`). If that analogy doesn't make sense then oh well, it at least
+// helped me!
+//
+// So anyway, we do some interesting trickery here to actually get it to work.
+// Each `TaskData<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're
+// not even adding data to the `Task`! Each `TaskData<A>` contains a reference
+// to this `Arc`, and `TaskData` handles can be cloned which just bumps the
+// reference count on the `Arc` itself.
+//
+// As before, though, you can present the `Arc` to a `Task` and if they
+// originated from the same place you're allowed safe access to the internals.
+// We allow but shared and mutable access without the `Sync` bound on the data,
+// crucially noting that a `Task` itself is not `Sync`.
+//
+// So hopefully I've convinced you of this point that the `get` and `get_mut`
+// methods below are indeed safe. The data is always valid as it's stored in an
+// `Arc`, and access is only allowed with the proof of the associated `Task`.
+// One thing you might be asking yourself though is what exactly is this "proof
+// of a task"? Right now it's a `usize` corresponding to the `Task`'s
+// `TaskHandle` arc allocation.
+//
+// Wait a minute, isn't that the ABA problem! That is, we create a task A, add
+// some data to it, destroy task A, do some work, create a task B, and then ask
+// to get the data from task B. In this case though the point of the
+// `task_inner` "proof" field is simply that there's some non-`Sync` token
+// proving that you can get access to the data. So while weird, this case should
+// still be safe, as the data's not stored in the task itself.
+
+use std::any::Any;
+use std::cell::{UnsafeCell, Cell};
+use std::marker;
+use std::panic;
+use std::sync::Arc;
+use std::sync::atomic::{AtomicUsize, AtomicBool, ATOMIC_USIZE_INIT, Ordering};
+use std::thread;
+
+use Future;
+use executor::{DEFAULT, Executor};
+use slot::Slot;
+
+/// A structure representing one "task", or thread of execution throughout the
+/// lifetime of a set of futures.
+///
+/// It's intended that futures are composed together to form a large "task" of
+/// futures which is driven as a whole throughout its lifetime. This task is
+/// persistent for the entire lifetime of the future until its completion,
+/// carrying any local data and such.
+///
+/// Currently tasks serve two primary purposes:
+///
+/// * They're used to drive futures to completion, e.g. executors (more to be
+///   changed here soon).
+/// * They store task local data. That is, any task can contain any number of
+///   pieces of arbitrary data which can be accessed at a later date. The data
+///   is owned and carried in the task itself, and `TaskData` handles are used
+///   to access the internals.
+///
+/// This structure is likely to expand more customizable functionality over
+/// time! That is, it's not quite done yet...
+pub struct Task {
+    handle: TaskHandle,
+
+    // A `Task` is not `Sync`, see the docs above.
+    _marker: marker::PhantomData<Cell<()>>,
+}
+
+/// A handle to a task that can be sent to other threads.
+///
+/// Created by the `Task::handle` method.
+#[derive(Clone)]
+pub struct TaskHandle {
+    inner: Arc<Inner>,
+}
+
+struct Inner {
+    slot: Slot<(Task, Box<Future<Item=(), Error=()>>)>,
+    registered: AtomicBool,
+}
+
+/// A reference to a piece of data that's stored inside of a `Task`.
+///
+/// This can be used with the `Task::get` and `Task::get_mut` methods to access
+/// data inside of tasks.
+pub struct TaskData<A> {
+    task_inner: usize,
+    ptr: Arc<UnsafeCell<A>>,
+}
+
+// for safety here, see docs at the top of this module
+unsafe impl<A: Send> Send for TaskData<A> {}
+unsafe impl<A: Sync> Sync for TaskData<A> {}
+
+impl Task {
+    /// Creates a new task ready to drive a future.
+    pub fn new() -> Task {
+        Task {
+            handle: TaskHandle {
+                inner: Arc::new(Inner {
+                    slot: Slot::new(None),
+                    registered: AtomicBool::new(false),
+                }),
+            },
+            _marker: marker::PhantomData,
+        }
+    }
+
+    /// Inserts a new piece of task-local data into this task, returning a
+    /// reference to it.
+    ///
+    /// Ownership of the data will be transferred to the task, and the data will
+    /// be destroyed when the task itself is destroyed. The returned value can
+    /// be passed to the `Task::{get, get_mut}` methods to get a reference back
+    /// to the original data.
+    ///
+    /// Note that the returned handle is cloneable and copyable and can be sent
+    /// to other futures which will be associated with the same task. All
+    /// futures will then have access to this data when passed the reference
+    /// back.
+    pub fn insert<A>(&mut self, a: A) -> TaskData<A>
+        where A: Any + Send + 'static,
+    {
+        TaskData {
+            task_inner: self.inner_usize(),
+            ptr: Arc::new(UnsafeCell::new(a)),
+        }
+    }
+
+    fn inner_usize(&self) -> usize {
+        &*self.handle.inner as *const Inner as usize
+    }
+
+    /// Get a reference to the task-local data inside this task.
+    ///
+    /// This method should be passed a handle previously returned by
+    /// `Task::insert`. That handle, when passed back into this method, will
+    /// retrieve a reference to the original data.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `data` does not belong to this task. That is,
+    /// if another task generated the `data` handle passed in, this method will
+    /// panic.
+    pub fn get<A>(&self, data: &TaskData<A>) -> &A {
+        // for safety here, see docs at the top of this module
+        assert_eq!(data.task_inner, self.inner_usize());
+        unsafe { &*data.ptr.get() }
+    }
+
+    /// Get a mutable reference to the task-local data inside this task.
+    ///
+    /// This method should be passed a handle previously returned by
+    /// `Task::insert`. That handle, when passed back into this method, will
+    /// retrieve a reference to the original data.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `data` does not belong to this task. That is,
+    /// if another task generated the `data` handle passed in, this method will
+    /// panic.
+    pub fn get_mut<A>(&mut self, data: &TaskData<A>) -> &mut A {
+        // for safety here, see docs at the top of this module
+        assert_eq!(data.task_inner, self.inner_usize());
+        unsafe { &mut *data.ptr.get() }
+    }
+
+    /// During the `Future::schedule` method, notify to the task that a value is
+    /// immediately ready.
+    ///
+    /// This method, more optimized than `TaskHandle::notify`, will inform the
+    /// task that the future which is being scheduled is immediately ready to be
+    /// `poll`ed again.
+    pub fn notify(&mut self) {
+        // TODO: optimize this, we've got mutable access so no need for atomics
+        self.handle().notify()
+    }
+
+    /// Gets a handle to this task which can be cloned to a piece of
+    /// `Send+'static` data.
+    ///
+    /// This handle returned can be used to notify the task that a future is
+    /// ready to get polled again. The returned handle implements the `Clone`
+    /// trait and all clones will refer to this same task.
+    ///
+    /// Note that if data is immediately ready then the `Task::notify` method
+    /// should be preferred.
+    pub fn handle(&self) -> &TaskHandle {
+        &self.handle
+    }
+
+    /// Consumes this task to run a future to completion.
+    ///
+    /// This function will consume the task provided and the task will be used
+    /// to execute the `future` provided until it has been completed. The future
+    /// wil be `poll`'ed until it is resolved, at which point the `Result<(),
+    /// ()>` will be discarded.
+    ///
+    /// The future will be `poll`ed on the threads that events arrive on. That
+    /// is, this method does not attempt to control which thread a future is
+    /// polled on.
+    ///
+    /// # Panics
+    ///
+    /// Currently, if `poll` panics, then this method will propagate the panic
+    /// to the thread that `poll` was called on. This is bad and it will change.
+    pub fn run(self, mut future: Box<Future<Item=(), Error=()>>) {
+        let mut me = self;
+        loop {
+            // Note that we need to poll at least once as the wake callback may
+            // have received an empty set of tokens, but that's still a valid
+            // reason to poll a future.
+            let result = catch_unwind(move || {
+                (future.poll(&mut me), future, me)
+            });
+            match result {
+                Ok((ref r, _, _)) if r.is_ready() => return,
+                Ok((_, f, t)) => {
+                    future = f;
+                    me = t;
+                }
+                // TODO: this error probably wants to get communicated to
+                //       another closure in one way or another, or perhaps if
+                //       nothing is registered the panic propagates.
+                Err(e) => panic::resume_unwind(e),
+            }
+            future = match future.tailcall() {
+                Some(f) => f,
+                None => future,
+            };
+            break
+        }
+
+        // Ok, we've seen that there are no tokens which show interest in the
+        // future. Schedule interest on the future for when something is ready
+        // and then relinquish the future and the forget back to the slot, which
+        // will then pick it up once a wake callback has fired.
+        future.schedule(&mut me);
+
+        let inner = me.handle.inner.clone();
+        inner.slot.try_produce((me, future)).ok().unwrap();
+    }
+}
+
+fn catch_unwind<F, U>(f: F) -> thread::Result<U>
+    where F: FnOnce() -> U + Send + 'static,
+{
+    panic::catch_unwind(panic::AssertUnwindSafe(f))
+}
+
+impl TaskHandle {
+    /// Returns whether this task handle and another point to the same task.
+    ///
+    /// In other words, this method returns whether `notify` would end up
+    /// notifying the same task. If two task handles need to be notified but
+    /// they are equivalent, then only one needs to be actually notified.
+    pub fn equivalent(&self, other: &TaskHandle) -> bool {
+        &*self.inner as *const _ == &*other.inner as *const _
+    }
+
+    /// Notify the associated task that a future is ready to get polled.
+    ///
+    /// Futures should use this method to ensure that when a future can make
+    /// progress as `Task` is notified that it should continue to `poll` the
+    /// future at a later date.
+    ///
+    /// Currently it's guaranteed that if `notify` is called that `poll` will be
+    /// scheduled to get called at some point in the future. A `poll` may
+    /// already be running on another thread, but this will ensure that a poll
+    /// happens again to receive this notification.
+    pub fn notify(&self) {
+        // First, see if we can actually register an `on_full` callback. The
+        // `Slot` requires that only one registration happens, and this flag
+        // guards that.
+        if self.inner.registered.swap(true, Ordering::SeqCst) {
+            return
+        }
+
+        // If we won the race to register a callback, do so now. Once the slot
+        // is resolve we allow another registration **before we poll again**.
+        // This allows any future which may be somewhat badly behaved to be
+        // compatible with this.
+        self.inner.slot.on_full(|slot| {
+            let (task, future) = slot.try_consume().ok().unwrap();
+            task.handle.inner.registered.store(false, Ordering::SeqCst);
+            DEFAULT.execute(|| task.run(future))
+        });
+    }
+}
+
+impl<A> Clone for TaskData<A> {
+    fn clone(&self) -> TaskData<A> {
+        TaskData {
+            task_inner: self.task_inner,
+            ptr: self.ptr.clone(),
+        }
+    }
+}
diff --git a/futures-rs-test-all/src/then.rs b/futures-rs-test-all/src/then.rs
new file mode 100644
index 000000000..9d5fce14f
--- /dev/null
+++ b/futures-rs-test-all/src/then.rs
@@ -0,0 +1,44 @@
+use {Future, IntoFuture, Task, Poll};
+use chain::Chain;
+
+/// Future for the `then` combinator, chaining computations on the end of
+/// another future regardless of its outcome.
+///
+/// This is created by this `Future::then` method.
+pub struct Then<A, B, F> where A: Future, B: IntoFuture {
+    state: Chain<A, B::Future, F>,
+}
+
+pub fn new<A, B, F>(future: A, f: F) -> Then<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+          F: Send + 'static,
+{
+    Then {
+        state: Chain::new(future, f),
+    }
+}
+
+impl<A, B, F> Future for Then<A, B, F>
+    where A: Future,
+          B: IntoFuture,
+          F: FnOnce(Result<A::Item, A::Error>) -> B + Send + 'static,
+{
+    type Item = B::Item;
+    type Error = B::Error;
+
+    fn poll(&mut self, task: &mut Task) -> Poll<B::Item, B::Error> {
+        self.state.poll(task, |a, f| {
+            Ok(Err(f(a).into_future()))
+        })
+    }
+
+    fn schedule(&mut self, task: &mut Task) {
+        self.state.schedule(task)
+    }
+
+    fn tailcall(&mut self)
+                -> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
+        self.state.tailcall()
+    }
+}
diff --git a/futures-rs-test-all/src/util.rs b/futures-rs-test-all/src/util.rs
new file mode 100644
index 000000000..1045bed31
--- /dev/null
+++ b/futures-rs-test-all/src/util.rs
@@ -0,0 +1,40 @@
+use {Future, Task, Poll};
+
+pub enum Collapsed<T: Future> {
+    Start(T),
+    Tail(Box<Future<Item=T::Item, Error=T::Error>>),
+}
+
+impl<T: Future> Collapsed<T> {
+    pub fn poll(&mut self, task: &mut Task) -> Poll<T::Item, T::Error> {
+        match *self {
+            Collapsed::Start(ref mut a) => a.poll(task),
+            Collapsed::Tail(ref mut a) => a.poll(task),
+        }
+    }
+
+    pub fn schedule(&mut self, task: &mut Task) {
+        match *self {
+            Collapsed::Start(ref mut a) => a.schedule(task),
+            Collapsed::Tail(ref mut a) => a.schedule(task),
+        }
+    }
+
+    pub fn collapse(&mut self) {
+        let a = match *self {
+            Collapsed::Start(ref mut a) => {
+                match a.tailcall() {
+                    Some(a) => a,
+                    None => return,
+                }
+            }
+            Collapsed::Tail(ref mut a) => {
+                if let Some(b) = a.tailcall() {
+                    *a = b;
+                }
+                return
+            }
+        };
+        *self = Collapsed::Tail(a);
+    }
+}
diff --git a/futures-rs-test-all/tests/all.rs b/futures-rs-test-all/tests/all.rs
new file mode 100644
index 000000000..599c4d727
--- /dev/null
+++ b/futures-rs-test-all/tests/all.rs
@@ -0,0 +1,352 @@
+extern crate futures;
+
+use std::sync::mpsc::{channel, TryRecvError};
+
+use futures::*;
+
+mod support;
+use support::*;
+
+fn unselect<T, U, E>(r: Result<(T, U), (E, U)>) -> Result<T, E> {
+    match r {
+        Ok((t, _)) => Ok(t),
+        Err((e, _)) => Err(e),
+    }
+}
+
+#[test]
+fn result_smoke() {
+    fn is_future_v<A, B, C>(_: C)
+        where A: Send + 'static,
+              B: Send + 'static,
+              C: Future<Item=A, Error=B>
+    {}
+
+    is_future_v::<i32, u32, _>(f_ok(1).map(|a| a + 1));
+    is_future_v::<i32, u32, _>(f_ok(1).map_err(|a| a + 1));
+    is_future_v::<i32, u32, _>(f_ok(1).and_then(|a| Ok(a)));
+    is_future_v::<i32, u32, _>(f_ok(1).or_else(|a| Err(a)));
+    is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3)));
+    is_future_v::<i32, u32, _>(f_ok(1).map(move |a| f_ok(a)).flatten());
+
+    assert_done(|| f_ok(1), ok(1));
+    assert_done(|| f_err(1), err(1));
+    assert_done(|| done(Ok(1)), ok(1));
+    assert_done(|| done(Err(1)), err(1));
+    assert_done(|| finished(1), ok(1));
+    assert_done(|| failed(1), err(1));
+    assert_done(|| f_ok(1).map(|a| a + 2), ok(3));
+    assert_done(|| f_err(1).map(|a| a + 2), err(1));
+    assert_done(|| f_ok(1).map_err(|a| a + 2), ok(1));
+    assert_done(|| f_err(1).map_err(|a| a + 2), err(3));
+    assert_done(|| f_ok(1).and_then(|a| Ok(a + 2)), ok(3));
+    assert_done(|| f_err(1).and_then(|a| Ok(a + 2)), err(1));
+    assert_done(|| f_ok(1).and_then(|a| Err(a as u32 + 3)), err(4));
+    assert_done(|| f_err(1).and_then(|a| Err(a as u32 + 4)), err(1));
+    assert_done(|| f_ok(1).or_else(|a| Ok(a as i32 + 2)), ok(1));
+    assert_done(|| f_err(1).or_else(|a| Ok(a as i32 + 2)), ok(3));
+    assert_done(|| f_ok(1).or_else(|a| Err(a + 3)), ok(1));
+    assert_done(|| f_err(1).or_else(|a| Err(a + 4)), err(5));
+    assert_done(|| f_ok(1).select(f_err(2)).then(unselect), ok(1));
+    assert_done(|| f_ok(1).select(Ok(2)).then(unselect), ok(1));
+    assert_done(|| f_err(1).select(f_ok(1)).then(unselect), err(1));
+    assert_done(|| f_ok(1).select(empty()).then(unselect), Ok(1));
+    assert_done(|| empty().select(f_ok(1)).then(unselect), Ok(1));
+    assert_done(|| f_ok(1).join(f_err(1)), Err(1));
+    assert_done(|| f_ok(1).join(Ok(2)), Ok((1, 2)));
+    assert_done(|| f_err(1).join(f_ok(1)), Err(1));
+    assert_done(|| f_ok(1).then(|_| Ok(2)), ok(2));
+    assert_done(|| f_ok(1).then(|_| Err(2)), err(2));
+    assert_done(|| f_err(1).then(|_| Ok(2)), ok(2));
+    assert_done(|| f_err(1).then(|_| Err(2)), err(2));
+}
+
+#[test]
+fn test_empty() {
+    fn empty() -> Empty<i32, u32> { futures::empty() }
+
+    assert_empty(|| empty());
+    assert_empty(|| empty().select(empty()));
+    assert_empty(|| empty().join(empty()));
+    assert_empty(|| empty().join(f_ok(1)));
+    assert_empty(|| f_ok(1).join(empty()));
+    assert_empty(|| empty().or_else(move |_| empty()));
+    assert_empty(|| empty().and_then(move |_| empty()));
+    assert_empty(|| f_err(1).or_else(move |_| empty()));
+    assert_empty(|| f_ok(1).and_then(move |_| empty()));
+    assert_empty(|| empty().map(|a| a + 1));
+    assert_empty(|| empty().map_err(|a| a + 1));
+    assert_empty(|| empty().then(|a| a));
+}
+
+#[test]
+fn test_finished() {
+    assert_done(|| finished(1), ok(1));
+    assert_done(|| failed(1), err(1));
+}
+
+#[test]
+fn flatten() {
+    fn finished<T: Send + 'static>(a: T) -> Finished<T, u32> {
+        futures::finished(a)
+    }
+    fn failed<E: Send + 'static>(b: E) -> Failed<i32, E> {
+        futures::failed(b)
+    }
+
+    assert_done(|| finished(finished(1)).flatten(), ok(1));
+    assert_done(|| finished(failed(1)).flatten(), err(1));
+    assert_done(|| failed(1u32).map(finished).flatten(), err(1));
+    assert_done(|| futures::finished::<_, u8>(futures::finished::<_, u32>(1))
+                           .flatten(), ok(1));
+    assert_empty(|| finished(empty::<i32, u32>()).flatten());
+    assert_empty(|| empty::<i32, u32>().map(finished).flatten());
+}
+
+#[test]
+fn smoke_promise() {
+    assert_done(|| {
+        let (c, p) = promise();
+        c.complete(1);
+        p
+    }, Ok(1));
+    assert_done(|| {
+        let (c, p) = promise::<i32>();
+        drop(c);
+        p
+    }, Err(Canceled));
+    let mut completes = Vec::new();
+    assert_empty(|| {
+        let (a, b) = promise::<i32>();
+        completes.push(a);
+        b
+    });
+
+    let (c, mut p) = promise::<i32>();
+    drop(c);
+    assert!(p.poll(&mut Task::new()).unwrap().is_err());
+    let (c, p) = promise::<i32>();
+    drop(c);
+    let (tx, rx) = channel();
+    Task::new().run(p.then(move |_| {
+        tx.send(()).unwrap();
+        Ok(())
+    }).boxed());
+    rx.recv().unwrap();
+}
+
+#[test]
+fn select_cancels() {
+    let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+    let ((btx, brx), (dtx, drx)) = (channel(), channel());
+    let b = b.map(move |b| { btx.send(b).unwrap(); b });
+    let d = d.map(move |d| { dtx.send(d).unwrap(); d });
+
+    let mut f = b.select(d).then(unselect);
+    // assert!(f.poll(&mut Task::new()).is_not_ready());
+    assert!(brx.try_recv().is_err());
+    assert!(drx.try_recv().is_err());
+    a.complete(1);
+    assert!(f.poll(&mut Task::new()).is_ready());
+    assert_eq!(brx.recv().unwrap(), 1);
+    drop((c, f));
+    assert!(drx.recv().is_err());
+
+    let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+    let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+    let b = b.map(move |b| { btx.send(b).unwrap(); b });
+    let d = d.map(move |d| { dtx.send(d).unwrap(); d });
+
+    let mut f = b.select(d).then(unselect);
+    let mut task = Task::new();
+    assert!(f.poll(&mut task).is_not_ready());
+    f.schedule(&mut task);
+    assert!(f.poll(&mut task).is_not_ready());
+    a.complete(1);
+    assert!(f.poll(&mut task).is_ready());
+    drop((c, f));
+    assert!(drx.recv().is_err());
+}
+
+#[test]
+fn join_cancels() {
+    let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+    let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+    let b = b.map(move |b| { btx.send(b).unwrap(); b });
+    let d = d.map(move |d| { dtx.send(d).unwrap(); d });
+
+    let mut f = b.join(d);
+    drop(a);
+    assert!(f.poll(&mut Task::new()).is_ready());
+    drop((c, f));
+    assert!(drx.recv().is_err());
+
+    let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+    let ((btx, _brx), (dtx, drx)) = (channel(), channel());
+    let b = b.map(move |b| { btx.send(b).unwrap(); b });
+    let d = d.map(move |d| { dtx.send(d).unwrap(); d });
+
+    let (tx, rx) = channel();
+    let f = b.join(d);
+    Task::new().run(f.then(move |_| {
+        tx.send(()).unwrap();
+        Ok(())
+    }).boxed());
+    assert!(rx.try_recv().is_err());
+    drop(a);
+    rx.recv().unwrap();
+    drop(c);
+    assert!(drx.recv().is_err());
+}
+
+#[test]
+fn join_incomplete() {
+    let (a, b) = promise::<i32>();
+    let mut f = finished(1).join(b);
+    assert!(f.poll(&mut Task::new()).is_not_ready());
+    let (tx, rx) = channel();
+    f.map(move |r| tx.send(r).unwrap()).forget();
+    assert!(rx.try_recv().is_err());
+    a.complete(2);
+    assert_eq!(rx.recv().unwrap(), (1, 2));
+
+    let (a, b) = promise::<i32>();
+    let mut f = b.join(Ok(2));
+    assert!(f.poll(&mut Task::new()).is_not_ready());
+    let (tx, rx) = channel();
+    f.map(move |r| tx.send(r).unwrap()).forget();
+    assert!(rx.try_recv().is_err());
+    a.complete(1);
+    assert_eq!(rx.recv().unwrap(), (1, 2));
+
+    let (a, b) = promise::<i32>();
+    let mut f = finished(1).join(b);
+    assert!(f.poll(&mut Task::new()).is_not_ready());
+    let (tx, rx) = channel();
+    f.map_err(move |_r| tx.send(2).unwrap()).forget();
+    assert!(rx.try_recv().is_err());
+    drop(a);
+    assert_eq!(rx.recv().unwrap(), 2);
+
+    let (a, b) = promise::<i32>();
+    let mut f = b.join(Ok(2));
+    assert!(f.poll(&mut Task::new()).is_not_ready());
+    let (tx, rx) = channel();
+    f.map_err(move |_r| tx.send(1).unwrap()).forget();
+    assert!(rx.try_recv().is_err());
+    drop(a);
+    assert_eq!(rx.recv().unwrap(), 1);
+}
+
+#[test]
+fn collect_collects() {
+    assert_done(|| collect(vec![f_ok(1), f_ok(2)]), Ok(vec![1, 2]));
+    assert_done(|| collect(vec![f_ok(1)]), Ok(vec![1]));
+    assert_done(|| collect(Vec::<Result<i32, u32>>::new()), Ok(vec![]));
+
+    // TODO: needs more tests
+}
+
+#[test]
+fn select2() {
+    fn d<T, U, E>(r: Result<(T, U), (E, U)>) -> Result<T, E> {
+        match r {
+            Ok((t, _u)) => Ok(t),
+            Err((e, _u)) => Err(e),
+        }
+    }
+
+    assert_done(|| f_ok(2).select(empty()).then(d), Ok(2));
+    assert_done(|| empty().select(f_ok(2)).then(d), Ok(2));
+    assert_done(|| f_err(2).select(empty()).then(d), Err(2));
+    assert_done(|| empty().select(f_err(2)).then(d), Err(2));
+
+    assert_done(|| {
+        f_ok(1).select(f_ok(2))
+               .map_err(|_| 0)
+               .and_then(|(a, b)| b.map(move |b| a + b))
+    }, Ok(3));
+
+    // Finish one half of a select and then fail the second, ensuring that we
+    // get the notification of the second one.
+    {
+        let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+        let f = b.select(d);
+        let (tx, rx) = channel();
+        f.map(move |r| tx.send(r).unwrap()).forget();
+        a.complete(1);
+        let (val, next) = rx.recv().unwrap();
+        assert_eq!(val, 1);
+        let (tx, rx) = channel();
+        next.map_err(move |_r| tx.send(2).unwrap()).forget();
+        assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty);
+        drop(c);
+        assert_eq!(rx.recv().unwrap(), 2);
+    }
+
+    // Fail the second half and ensure that we see the first one finish
+    {
+        let ((a, b), (c, d)) = (promise::<i32>(), promise::<i32>());
+        let f = b.select(d);
+        let (tx, rx) = channel();
+        f.map_err(move |r| tx.send((1, r.1)).unwrap()).forget();
+        drop(c);
+        let (val, next) = rx.recv().unwrap();
+        assert_eq!(val, 1);
+        let (tx, rx) = channel();
+        next.map(move |r| tx.send(r).unwrap()).forget();
+        assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty);
+        a.complete(2);
+        assert_eq!(rx.recv().unwrap(), 2);
+    }
+
+    // Cancelling the first half should cancel the second
+    {
+        let ((_a, b), (_c, d)) = (promise::<i32>(), promise::<i32>());
+        let ((btx, brx), (dtx, drx)) = (channel(), channel());
+        let b = b.map(move |v| { btx.send(v).unwrap(); v });
+        let d = d.map(move |v| { dtx.send(v).unwrap(); v });
+        let f = b.select(d);
+        drop(f);
+        assert!(drx.recv().is_err());
+        assert!(brx.recv().is_err());
+    }
+
+    // Cancel after a schedule
+    {
+        let ((_a, b), (_c, d)) = (promise::<i32>(), promise::<i32>());
+        let ((btx, brx), (dtx, drx)) = (channel(), channel());
+        let b = b.map(move |v| { btx.send(v).unwrap(); v });
+        let d = d.map(move |v| { dtx.send(v).unwrap(); v });
+        let mut f = b.select(d);
+        f.schedule(&mut Task::new());
+        drop(f);
+        assert!(drx.recv().is_err());
+        assert!(brx.recv().is_err());
+    }
+
+    // Cancel propagates
+    {
+        let ((a, b), (_c, d)) = (promise::<i32>(), promise::<i32>());
+        let ((btx, brx), (dtx, drx)) = (channel(), channel());
+        let b = b.map(move |v| { btx.send(v).unwrap(); v });
+        let d = d.map(move |v| { dtx.send(v).unwrap(); v });
+        let (tx, rx) = channel();
+        b.select(d).map(move |_| tx.send(()).unwrap()).forget();
+        drop(a);
+        assert!(drx.recv().is_err());
+        assert!(brx.recv().is_err());
+        assert!(rx.recv().is_err());
+    }
+
+    // Cancel on early drop
+    {
+        let (tx, rx) = channel();
+        let f = f_ok(1).select(empty().map(move |()| {
+            tx.send(()).unwrap();
+            1
+        }));
+        drop(f);
+        assert!(rx.recv().is_err());
+    }
+}
diff --git a/futures-rs-test-all/tests/support/mod.rs b/futures-rs-test-all/tests/support/mod.rs
new file mode 100644
index 000000000..fe45f2f2e
--- /dev/null
+++ b/futures-rs-test-all/tests/support/mod.rs
@@ -0,0 +1,60 @@
+#![allow(dead_code)]
+
+use std::fmt;
+use futures::*;
+use futures::stream::Stream;
+
+pub fn f_ok(a: i32) -> Done<i32, u32> { Ok(a).into_future() }
+pub fn f_err(a: u32) -> Done<i32, u32> { Err(a).into_future() }
+pub fn ok(a: i32) -> Result<i32, u32> { Ok(a) }
+pub fn err(a: u32) -> Result<i32, u32> { Err(a) }
+
+pub fn assert_done<T, F>(mut f: F, result: Result<T::Item, T::Error>)
+    where T: Future,
+          T::Item: Eq + fmt::Debug,
+          T::Error: Eq + fmt::Debug,
+          F: FnMut() -> T,
+{
+    let mut a = f();
+    assert_eq!(&a.poll(&mut Task::new()).unwrap(), &result);
+    drop(a);
+}
+
+pub fn assert_empty<T: Future, F: FnMut() -> T>(mut f: F) {
+    assert!(f().poll(&mut Task::new()).is_not_ready());
+
+    let mut a = f();
+    let mut task = Task::new();
+    a.schedule(&mut task);
+    assert!(a.poll(&mut task).is_not_ready());
+    drop(a);
+}
+
+pub fn sassert_done<S: Stream>(s: &mut S) {
+    match s.poll(&mut Task::new()) {
+        Poll::Ok(None) => {}
+        Poll::Ok(Some(_)) => panic!("stream had more elements"),
+        Poll::Err(_) => panic!("stream had an error"),
+        Poll::NotReady => panic!("stream wasn't ready"),
+    }
+}
+
+pub fn sassert_empty<S: Stream>(s: &mut S) {
+    match s.poll(&mut Task::new()) {
+        Poll::Ok(None) => panic!("stream is at its end"),
+        Poll::Ok(Some(_)) => panic!("stream had more elements"),
+        Poll::Err(_) => panic!("stream had an error"),
+        Poll::NotReady => {}
+    }
+}
+
+pub fn sassert_next<S: Stream>(s: &mut S, item: S::Item)
+    where S::Item: Eq + fmt::Debug
+{
+    match s.poll(&mut Task::new()) {
+        Poll::Ok(None) => panic!("stream is at its end"),
+        Poll::Ok(Some(e)) => assert_eq!(e, item),
+        Poll::Err(_) => panic!("stream had an error"),
+        Poll::NotReady => panic!("stream wasn't ready"),
+    }
+}