diff --git a/README.md b/README.md
index 7d6de4dd0dc..587a248ccc7 100644
--- a/README.md
+++ b/README.md
@@ -54,6 +54,17 @@ almost always be binary compatible with prior minor releases from the same major
 Patch 5.x.y increments (such as 5.0.0 -> 5.0.1, 5.1.1 -> 5.1.2, etc) will occur for bug fixes only and will always be binary compatible
 with prior patch releases of the same minor release branch.
 
+#### @Alpha
+
+APIs marked with the `@Alpha` annotation are in the early stages of development, subject to incompatible changes, 
+or even removal, in a future release and may lack some intended features. An APIs bearing `@Alpha` annotation may 
+contain known issues affecting functionality, performance, and stability. They are also exempt from any compatibility 
+guarantees made by its containing library.
+
+It is inadvisable for <i>applications</i> to use Alpha APIs in production environments or for <i>libraries</i>
+(which get included on users' CLASSPATHs, outside the library developers' control) to depend on these APIs. Alpha APIs
+are intended for <b>experimental purposes</b> only.
+
 #### @Beta
 
 APIs marked with the `@Beta` annotation at the class or method level are subject to change. They can be modified in any way, or even
diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES
index 200d5d3803a..7229bf71926 100644
--- a/THIRD-PARTY-NOTICES
+++ b/THIRD-PARTY-NOTICES
@@ -37,7 +37,10 @@ https://github.com/mongodb/mongo-java-driver.
     See the License for the specific language governing permissions and
     limitations under the License.
 
-3) The following files: Beta.java
+3) The following files:
+
+    Alpha.java (formerly Beta.java)
+    Beta.java
 
     Copyright 2010 The Guava Authors
     Copyright 2011 The Guava Authors
diff --git a/bson/src/main/org/bson/assertions/Assertions.java b/bson/src/main/org/bson/assertions/Assertions.java
index 31b81a45f50..414c318122d 100644
--- a/bson/src/main/org/bson/assertions/Assertions.java
+++ b/bson/src/main/org/bson/assertions/Assertions.java
@@ -116,6 +116,19 @@ public static <T> T assertNotNull(@Nullable final T value) throws AssertionError
         return value;
     }
 
+    /**
+     * Throw AssertionError if the condition if false.
+     *
+     * @param name      the name of the state that is being checked
+     * @param condition the condition about the parameter to check
+     * @throws AssertionError if the condition is false
+     */
+    public static void assertTrue(final String name, final boolean condition) {
+        if (!condition) {
+            throw new AssertionError("state should be: " + assertNotNull(name));
+        }
+    }
+
     /**
      * Cast an object to the given class and return it, or throw IllegalArgumentException if it's not assignable to that class.
      *
diff --git a/build.gradle b/build.gradle
index 693b514b738..50623ee32bf 100644
--- a/build.gradle
+++ b/build.gradle
@@ -158,6 +158,7 @@ configure(scalaProjects) {
                     "-unchecked",
                     "-language:reflectiveCalls",
                     "-Wconf:cat=deprecation:ws,any:e",
+                    "-Wconf:msg=While parsing annotations in:silent",
                     "-Xlint:strict-unsealed-patmat"
             ]
         }
diff --git a/config/checkstyle/suppressions.xml b/config/checkstyle/suppressions.xml
index 49f3fb18e9e..6d24f861e08 100644
--- a/config/checkstyle/suppressions.xml
+++ b/config/checkstyle/suppressions.xml
@@ -29,6 +29,7 @@
     <suppress checks="MethodLength" files="QuickTour"/>
     <suppress checks="Regexp" files="Tour"/>
 
+    <suppress checks="FileLength" files="UnifiedCrudHelper"/>
     <suppress checks="MethodLength" files="PojoRoundTripTest"/>
     <suppress checks="MethodLength" files="AbstractUnifiedTest"/>
     <suppress checks="MethodLength" files="AbstractClientSideEncryptionTest"/>
@@ -87,6 +88,7 @@
     <suppress checks="ParameterNumber" files="Operations"/>
     <suppress checks="ParameterNumber" files="ChangeStreamDocument"/>
     <suppress checks="ParameterNumber" files="StructuredLogger"/>
+    <suppress checks="ParameterNumber" files="MongoClusterImpl"/>
 
     <!--Legacy code that has not yet been cleaned-->
     <suppress checks="FinalClass" files="AggregationOptions"/>
diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml
index 09af427f8d9..fedf0c72566 100644
--- a/config/spotbugs/exclude.xml
+++ b/config/spotbugs/exclude.xml
@@ -229,7 +229,7 @@
     -->
     <Match>
         <!-- MongoDB status: "False Positive", SpotBugs rank: 13 -->
-        <Class name="com.mongodb.kotlin.client.coroutine.MongoClient"/>
+        <Class name="com.mongodb.kotlin.client.coroutine.MongoCluster"/>
         <Method name="startSession"/>
         <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE"/>
     </Match>
@@ -239,4 +239,25 @@
         <Bug pattern="NP_NONNULL_PARAM_VIOLATION"/>
     </Match>
 
+    <!-- Ignoring await return; intended to be used in a loop -->
+    <Match>
+        <Class name="com.mongodb.internal.time.Timeout"/>
+        <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE"/>
+    </Match>
+    <Match>
+        <Class name="com.mongodb.internal.time.Timeout"/>
+        <Bug pattern="RV_RETURN_VALUE_IGNORED"/>
+    </Match>
+    <Match>
+        <Class name="com.mongodb.internal.time.Timeout"/>
+        <Bug pattern="WA_AWAIT_NOT_IN_LOOP"/>
+    </Match>
+
+    <!-- Void method returning null but @NotNull API -->
+    <Match>
+        <Class name="com.mongodb.internal.operation.DropIndexOperation"/>
+        <Method name="execute"/>
+        <Bug pattern="NP_NONNULL_RETURN_VIOLATION"/>
+    </Match>
+
 </FindBugsFilter>
diff --git a/driver-core/build.gradle b/driver-core/build.gradle
index 40a63c15d49..1f7d06f93f2 100644
--- a/driver-core/build.gradle
+++ b/driver-core/build.gradle
@@ -58,6 +58,7 @@ dependencies {
     implementation "org.mongodb:mongodb-crypt:$mongoCryptVersion", optional
 
     testImplementation project(':bson').sourceSets.test.output
+    testImplementation('org.junit.jupiter:junit-jupiter-api')
     testRuntimeOnly "io.netty:netty-tcnative-boringssl-static"
 
     classifiers.forEach {
diff --git a/driver-core/src/main/com/mongodb/AwsCredential.java b/driver-core/src/main/com/mongodb/AwsCredential.java
index dfd6c86776c..2fd6f8fb6f4 100644
--- a/driver-core/src/main/com/mongodb/AwsCredential.java
+++ b/driver-core/src/main/com/mongodb/AwsCredential.java
@@ -17,6 +17,7 @@
 package com.mongodb;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 
 import static com.mongodb.assertions.Assertions.notNull;
@@ -28,7 +29,7 @@
  * @see MongoCredential#AWS_CREDENTIAL_PROVIDER_KEY
  * @since 4.4
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public final class AwsCredential {
     private final String accessKeyId;
     private final String secretAccessKey;
diff --git a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java
index 2df4b3363d4..ee9b88817e7 100644
--- a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java
+++ b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java
@@ -16,15 +16,21 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.annotations.Reason;
+import com.mongodb.lang.Nullable;
 
 import javax.net.ssl.SSLContext;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout;
 import static java.util.Collections.unmodifiableMap;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * The client-side settings for data key creation and explicit encryption.
@@ -42,6 +48,8 @@ public final class ClientEncryptionSettings {
     private final Map<String, Map<String, Object>> kmsProviders;
     private final Map<String, Supplier<Map<String, Object>>> kmsProviderPropertySuppliers;
     private final Map<String, SSLContext> kmsProviderSslContextMap;
+    @Nullable
+    private final Long timeoutMS;
     /**
      * A builder for {@code ClientEncryptionSettings} so that {@code ClientEncryptionSettings} can be immutable, and to support easier
      * construction through chaining.
@@ -53,6 +61,8 @@ public static final class Builder {
         private Map<String, Map<String, Object>> kmsProviders;
         private Map<String, Supplier<Map<String, Object>>> kmsProviderPropertySuppliers = new HashMap<>();
         private Map<String, SSLContext> kmsProviderSslContextMap = new HashMap<>();
+        @Nullable
+        private Long timeoutMS;
 
         /**
          * Sets the {@link MongoClientSettings} that will be used to access the key vault.
@@ -120,6 +130,43 @@ public Builder kmsProviderSslContextMap(final Map<String, SSLContext> kmsProvide
             return this;
         }
 
+        /**
+         * Sets the time limit for the full execution of an operation.
+         *
+         * <ul>
+         *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+         *    <ul>
+         *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+         *        available</li>
+         *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+         *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+         *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+         *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+         *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+         *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+         *   </ul>
+         *   </li>
+         *   <li>{@code 0} means infinite timeout.</li>
+         *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+         * </ul>
+         *
+         * <p><strong>Note:</strong> The timeout set through this method overrides the timeout defined in the key vault client settings
+         * specified in {@link #keyVaultMongoClientSettings(MongoClientSettings)}.
+         * Essentially, for operations that require accessing the key vault, the remaining timeout from the initial operation
+         * determines the duration allowed for key vault access.</p>
+         *
+         * @param timeout the timeout
+         * @param timeUnit the time unit
+         * @return this
+         * @since 5.2
+         * @see #getTimeout
+         */
+        @Alpha(Reason.CLIENT)
+        public ClientEncryptionSettings.Builder timeout(final long timeout, final TimeUnit timeUnit) {
+            this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit);
+            return this;
+        }
+
         /**
          * Build an instance of {@code ClientEncryptionSettings}.
          *
@@ -253,12 +300,46 @@ public Map<String, SSLContext> getKmsProviderSslContextMap() {
         return unmodifiableMap(kmsProviderSslContextMap);
     }
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If set the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS);
+    }
+
     private ClientEncryptionSettings(final Builder builder) {
         this.keyVaultMongoClientSettings = notNull("keyVaultMongoClientSettings", builder.keyVaultMongoClientSettings);
         this.keyVaultNamespace = notNull("keyVaultNamespace", builder.keyVaultNamespace);
         this.kmsProviders = notNull("kmsProviders", builder.kmsProviders);
         this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", builder.kmsProviderPropertySuppliers);
         this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", builder.kmsProviderSslContextMap);
+        this.timeoutMS = builder.timeoutMS;
     }
 
 }
diff --git a/driver-core/src/main/com/mongodb/ClientSessionOptions.java b/driver-core/src/main/com/mongodb/ClientSessionOptions.java
index 7a272016006..160d16c3486 100644
--- a/driver-core/src/main/com/mongodb/ClientSessionOptions.java
+++ b/driver-core/src/main/com/mongodb/ClientSessionOptions.java
@@ -16,14 +16,19 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import com.mongodb.session.ClientSession;
 
 import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * The options to apply to a {@code ClientSession}.
@@ -38,6 +43,7 @@ public final class ClientSessionOptions {
 
     private final Boolean causallyConsistent;
     private final Boolean snapshot;
+    private final Long defaultTimeoutMS;
     private final TransactionOptions defaultTransactionOptions;
 
     /**
@@ -77,6 +83,25 @@ public TransactionOptions getDefaultTransactionOptions() {
         return defaultTransactionOptions;
     }
 
+    /**
+     * Gets the default time limit for the following operations executed on the session:
+     *
+     * <ul>
+     *   <li>{@code commitTransaction}</li>
+     *   <li>{@code abortTransaction}</li>
+     *   <li>{@code withTransaction}</li>
+     *   <li>{@code close}</li>
+     * </ul>
+     * @param timeUnit the time unit
+     * @return the default timeout
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public Long getDefaultTimeout(final TimeUnit timeUnit) {
+        return defaultTimeoutMS == null ? null : timeUnit.convert(defaultTimeoutMS, MILLISECONDS);
+    }
+
     @Override
     public boolean equals(final Object o) {
         if (this == o) {
@@ -85,36 +110,24 @@ public boolean equals(final Object o) {
         if (o == null || getClass() != o.getClass()) {
             return false;
         }
-
-        ClientSessionOptions that = (ClientSessionOptions) o;
-
-        if (!Objects.equals(causallyConsistent, that.causallyConsistent)) {
-            return false;
-        }
-
-        if (!Objects.equals(snapshot, that.snapshot)) {
-            return false;
-        }
-        if (!Objects.equals(defaultTransactionOptions, that.defaultTransactionOptions)) {
-            return false;
-        }
-
-        return true;
+        final ClientSessionOptions that = (ClientSessionOptions) o;
+        return Objects.equals(causallyConsistent, that.causallyConsistent)
+                && Objects.equals(snapshot, that.snapshot)
+                && Objects.equals(defaultTimeoutMS, that.defaultTimeoutMS)
+                && Objects.equals(defaultTransactionOptions, that.defaultTransactionOptions);
     }
 
     @Override
     public int hashCode() {
-        int result = causallyConsistent != null ? causallyConsistent.hashCode() : 0;
-        result = 31 * result + (snapshot != null ? snapshot.hashCode() : 0);
-        result = 31 * result + (defaultTransactionOptions != null ? defaultTransactionOptions.hashCode() : 0);
-        return result;
+        return Objects.hash(causallyConsistent, snapshot, defaultTimeoutMS, defaultTransactionOptions);
     }
 
     @Override
     public String toString() {
         return "ClientSessionOptions{"
                 + "causallyConsistent=" + causallyConsistent
-                + "snapshot=" + snapshot
+                + ", snapshot=" + snapshot
+                + ", defaultTimeoutMS=" + defaultTimeoutMS
                 + ", defaultTransactionOptions=" + defaultTransactionOptions
                 + '}';
     }
@@ -141,6 +154,7 @@ public static Builder builder(final ClientSessionOptions options) {
         builder.causallyConsistent = options.isCausallyConsistent();
         builder.snapshot = options.isSnapshot();
         builder.defaultTransactionOptions = options.getDefaultTransactionOptions();
+        builder.defaultTimeoutMS = options.defaultTimeoutMS;
         return builder;
     }
 
@@ -151,6 +165,7 @@ public static Builder builder(final ClientSessionOptions options) {
     public static final class Builder {
         private Boolean causallyConsistent;
         private Boolean snapshot;
+        private Long defaultTimeoutMS;
         private TransactionOptions defaultTransactionOptions = TransactionOptions.builder().build();
 
         /**
@@ -196,6 +211,27 @@ public Builder defaultTransactionOptions(final TransactionOptions defaultTransac
             return this;
         }
 
+        /**
+         * Sets the default time limit for the following operations executed on the session:
+         *
+         * <ul>
+         *   <li>{@code commitTransaction}</li>
+         *   <li>{@code abortTransaction}</li>
+         *   <li>{@code withTransaction}</li>
+         *   <li>{@code close}</li>
+         * </ul>
+         * @param defaultTimeout the timeout
+         * @param timeUnit the time unit
+         * @return this
+         * @since 5.2
+         * @see #getDefaultTimeout
+         */
+        @Alpha(Reason.CLIENT)
+        public Builder defaultTimeout(final long defaultTimeout, final TimeUnit timeUnit) {
+            this.defaultTimeoutMS = convertAndValidateTimeout(defaultTimeout, timeUnit, "defaultTimeout");
+            return this;
+        }
+
         /**
          * Build the session options instance.
          *
@@ -218,5 +254,6 @@ private ClientSessionOptions(final Builder builder) {
                 : Boolean.valueOf(!builder.snapshot);
         this.snapshot = builder.snapshot;
         this.defaultTransactionOptions = builder.defaultTransactionOptions;
+        this.defaultTimeoutMS = builder.defaultTimeoutMS;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java
index 17a990ea127..f779ab7290d 100644
--- a/driver-core/src/main/com/mongodb/ConnectionString.java
+++ b/driver-core/src/main/com/mongodb/ConnectionString.java
@@ -16,6 +16,8 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.connection.ClusterSettings;
 import com.mongodb.connection.ConnectionPoolSettings;
 import com.mongodb.connection.ServerMonitoringMode;
@@ -139,9 +141,12 @@
  * <li>{@code sslInvalidHostNameAllowed=true|false}: Whether to allow invalid host names for TLS connections.</li>
  * <li>{@code tlsAllowInvalidHostnames=true|false}: Whether to allow invalid host names for TLS connections. Supersedes the
  * sslInvalidHostNameAllowed option</li>
+ * <li>{@code timeoutMS=ms}: Time limit for the full execution of an operation. Note: This parameter is part of an {@linkplain Alpha Alpha API} and may be
+ * subject to changes or even removal in future releases.</li>
  * <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
  * <li>{@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out.
- * This option is the same as {@link SocketSettings#getReadTimeout(TimeUnit)}.</li>
+ * This option is the same as {@link SocketSettings#getReadTimeout(TimeUnit)}.
+ * Deprecated, use {@code timeoutMS} instead.</li>
  * <li>{@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed</li>
  * <li>{@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed</li>
  * </ul>
@@ -161,7 +166,7 @@
  * <li>{@code waitQueueTimeoutMS=ms}: The maximum duration to wait until either:
  * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available},
  * or a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}.
- * See {@link #getMaxWaitTime()} for more details.</li>
+ * See {@link #getMaxWaitTime()} for more details. . Deprecated, use {@code timeoutMS} instead.</li>
  * <li>{@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.</li>
  * </ul>
  * <p>Write concern configuration:</p>
@@ -189,7 +194,7 @@
  * <li>{@code wtimeoutMS=ms}
  * <ul>
  * <li>The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.</li>
- * <li>Used in combination with {@code w}</li>
+ * <li>Used in combination with {@code w}. Deprecated, use {@code timeoutMS} instead</li>
  * </ul>
  * </li>
  * </ul>
@@ -311,6 +316,7 @@ public class ConnectionString {
     private Integer maxConnectionLifeTime;
     private Integer maxConnecting;
     private Integer connectTimeout;
+    private Long timeout;
     private Integer socketTimeout;
     private Boolean sslEnabled;
     private Boolean sslInvalidHostnameAllowed;
@@ -503,6 +509,7 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient
 
         credential = createCredentials(combinedOptionsMaps, userName, password);
         warnOnUnsupportedOptions(combinedOptionsMaps);
+        warnDeprecatedTimeouts(combinedOptionsMaps);
     }
 
     private static final Set<String> GENERAL_OPTIONS_KEYS = new LinkedHashSet<>();
@@ -511,16 +518,18 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient
     private static final Set<String> WRITE_CONCERN_KEYS = new HashSet<>();
     private static final Set<String> COMPRESSOR_KEYS = new HashSet<>();
     private static final Set<String> ALL_KEYS = new HashSet<>();
+    private static final Set<String> DEPRECATED_TIMEOUT_KEYS = new HashSet<>();
 
     static {
         GENERAL_OPTIONS_KEYS.add("minpoolsize");
         GENERAL_OPTIONS_KEYS.add("maxpoolsize");
+        GENERAL_OPTIONS_KEYS.add("timeoutms");
+        GENERAL_OPTIONS_KEYS.add("sockettimeoutms");
         GENERAL_OPTIONS_KEYS.add("waitqueuetimeoutms");
         GENERAL_OPTIONS_KEYS.add("connecttimeoutms");
         GENERAL_OPTIONS_KEYS.add("maxidletimems");
         GENERAL_OPTIONS_KEYS.add("maxlifetimems");
         GENERAL_OPTIONS_KEYS.add("maxconnecting");
-        GENERAL_OPTIONS_KEYS.add("sockettimeoutms");
 
         // Order matters here: Having tls after ssl means than the tls option will supersede the ssl option when both are set
         GENERAL_OPTIONS_KEYS.add("ssl");
@@ -583,6 +592,10 @@ public ConnectionString(final String connectionString, @Nullable final DnsClient
         ALL_KEYS.addAll(READ_PREFERENCE_KEYS);
         ALL_KEYS.addAll(WRITE_CONCERN_KEYS);
         ALL_KEYS.addAll(COMPRESSOR_KEYS);
+
+        DEPRECATED_TIMEOUT_KEYS.add("sockettimeoutms");
+        DEPRECATED_TIMEOUT_KEYS.add("waitqueuetimeoutms");
+        DEPRECATED_TIMEOUT_KEYS.add("wtimeoutms");
     }
 
     // Any options contained in the connection string completely replace the corresponding options specified in TXT records,
@@ -596,15 +609,23 @@ private Map<String, List<String>> combineOptionsMaps(final Map<String, List<Stri
 
 
     private void warnOnUnsupportedOptions(final Map<String, List<String>> optionsMap) {
-        for (final String key : optionsMap.keySet()) {
-            if (!ALL_KEYS.contains(key)) {
-                if (LOGGER.isWarnEnabled()) {
-                    LOGGER.warn(format("Connection string contains unsupported option '%s'.", key));
-                }
-            }
+        if (LOGGER.isWarnEnabled()) {
+            optionsMap.keySet()
+                    .stream()
+                    .filter(k -> !ALL_KEYS.contains(k))
+                    .forEach(k -> LOGGER.warn(format("Connection string contains unsupported option '%s'.", k)));
+        }
+    }
+    private void warnDeprecatedTimeouts(final Map<String, List<String>> optionsMap) {
+        if (LOGGER.isWarnEnabled()) {
+            optionsMap.keySet()
+                    .stream()
+                    .filter(DEPRECATED_TIMEOUT_KEYS::contains)
+                    .forEach(k -> LOGGER.warn(format("Use of deprecated timeout option: '%s'. Prefer 'timeoutMS' instead.", k)));
         }
     }
 
+
     private void translateOptions(final Map<String, List<String>> optionsMap) {
         boolean tlsInsecureSet = false;
         boolean tlsAllowInvalidHostnamesSet = false;
@@ -639,6 +660,9 @@ private void translateOptions(final Map<String, List<String>> optionsMap) {
                 case "sockettimeoutms":
                     socketTimeout = parseInteger(value, "sockettimeoutms");
                     break;
+                case "timeoutms":
+                    timeout = parseLong(value, "timeoutms");
+                    break;
                 case "proxyhost":
                     proxyHost = value;
                     break;
@@ -1159,6 +1183,15 @@ private int parseInteger(final String input, final String key) {
         }
     }
 
+    private long parseLong(final String input, final String key) {
+        try {
+            return Long.parseLong(input);
+        } catch (NumberFormatException e) {
+            throw new IllegalArgumentException(format("The connection string contains an invalid value for '%s'. "
+                    + "'%s' is not a valid long", key, input));
+        }
+    }
+
     private List<String> parseHosts(final List<String> rawHosts) {
         if (rawHosts.size() == 0){
             throw new IllegalArgumentException("The connection string must contain at least one host");
@@ -1533,6 +1566,38 @@ public Integer getMaxConnecting() {
         return maxConnecting;
     }
 
+    /**
+     * The time limit for the full execution of an operation in milliseconds.
+     *
+     * <p>If set the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @return the time limit for the full execution of an operation in milliseconds or null.
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public Long getTimeout() {
+        return timeout;
+    }
+
     /**
      * Gets the socket connect timeout specified in the connection string.
      * @return the socket connect timeout
@@ -1737,6 +1802,7 @@ public boolean equals(final Object o) {
                 && Objects.equals(maxConnectionLifeTime, that.maxConnectionLifeTime)
                 && Objects.equals(maxConnecting, that.maxConnecting)
                 && Objects.equals(connectTimeout, that.connectTimeout)
+                && Objects.equals(timeout, that.timeout)
                 && Objects.equals(socketTimeout, that.socketTimeout)
                 && Objects.equals(proxyHost, that.proxyHost)
                 && Objects.equals(proxyPort, that.proxyPort)
@@ -1760,7 +1826,7 @@ public boolean equals(final Object o) {
     public int hashCode() {
         return Objects.hash(credential, isSrvProtocol, hosts, database, collection, directConnection, readPreference,
                 writeConcern, retryWrites, retryReads, readConcern, minConnectionPoolSize, maxConnectionPoolSize, maxWaitTime,
-                maxConnectionIdleTime, maxConnectionLifeTime, maxConnecting, connectTimeout, socketTimeout, sslEnabled,
+                maxConnectionIdleTime, maxConnectionLifeTime, maxConnecting, connectTimeout, timeout, socketTimeout, sslEnabled,
                 sslInvalidHostnameAllowed, requiredReplicaSetName, serverSelectionTimeout, localThreshold, heartbeatFrequency,
                 serverMonitoringMode, applicationName, compressorList, uuidRepresentation, srvServiceName, srvMaxHosts, proxyHost,
                 proxyPort, proxyUsername, proxyPassword);
diff --git a/driver-core/src/main/com/mongodb/MongoClientSettings.java b/driver-core/src/main/com/mongodb/MongoClientSettings.java
index 0d98bbe33d3..31206e56029 100644
--- a/driver-core/src/main/com/mongodb/MongoClientSettings.java
+++ b/driver-core/src/main/com/mongodb/MongoClientSettings.java
@@ -16,8 +16,10 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.gridfs.codecs.GridFSFileCodecProvider;
 import com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider;
 import com.mongodb.client.model.mql.ExpressionCodecProvider;
@@ -49,9 +51,12 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 
+import static com.mongodb.assertions.Assertions.isTrue;
 import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout;
 import static java.util.Arrays.asList;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders;
@@ -111,6 +116,8 @@ public final class MongoClientSettings {
     private final ContextProvider contextProvider;
     private final DnsClient dnsClient;
     private final InetAddressResolver inetAddressResolver;
+    @Nullable
+    private final Long timeoutMS;
 
     /**
      * Gets the default codec registry.  It includes the following providers:
@@ -226,6 +233,7 @@ public static final class Builder {
 
         private int heartbeatConnectTimeoutMS;
         private int heartbeatSocketTimeoutMS;
+        private Long timeoutMS;
 
         private ContextProvider contextProvider;
         private DnsClient dnsClient;
@@ -249,6 +257,7 @@ private Builder(final MongoClientSettings settings) {
             uuidRepresentation = settings.getUuidRepresentation();
             serverApi = settings.getServerApi();
             dnsClient = settings.getDnsClient();
+            timeoutMS = settings.getTimeout(MILLISECONDS);
             inetAddressResolver = settings.getInetAddressResolver();
             transportSettings = settings.getTransportSettings();
             autoEncryptionSettings = settings.getAutoEncryptionSettings();
@@ -311,6 +320,9 @@ public Builder applyConnectionString(final ConnectionString connectionString) {
             if (connectionString.getWriteConcern() != null) {
                 writeConcern = connectionString.getWriteConcern();
             }
+            if (connectionString.getTimeout() != null) {
+                timeoutMS = connectionString.getTimeout();
+            }
             return this;
         }
 
@@ -666,6 +678,39 @@ public Builder inetAddressResolver(@Nullable final InetAddressResolver inetAddre
             return this;
         }
 
+
+        /**
+         * Sets the time limit for the full execution of an operation.
+         *
+         * <ul>
+         *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+         *    <ul>
+         *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+         *        available</li>
+         *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+         *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+         *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+         *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+         *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+         *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+         *   </ul>
+         *   </li>
+         *   <li>{@code 0} means infinite timeout.</li>
+         *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+         * </ul>
+         *
+         * @param timeout the timeout
+         * @param timeUnit the time unit
+         * @return this
+         * @since 5.2
+         * @see #getTimeout
+         */
+        @Alpha(Reason.CLIENT)
+        public Builder timeout(final long timeout, final TimeUnit timeUnit) {
+            this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit);
+            return this;
+        }
+
         // Package-private to provide interop with MongoClientOptions
         Builder heartbeatConnectTimeoutMS(final int heartbeatConnectTimeoutMS) {
             this.heartbeatConnectTimeoutMS = heartbeatConnectTimeoutMS;
@@ -846,6 +891,39 @@ public ServerApi getServerApi() {
         return serverApi;
     }
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If set the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS);
+    }
+
     /**
      * Gets the auto-encryption settings.
      * <p>
@@ -996,7 +1074,8 @@ public boolean equals(final Object o) {
                 && Objects.equals(autoEncryptionSettings, that.autoEncryptionSettings)
                 && Objects.equals(dnsClient, that.dnsClient)
                 && Objects.equals(inetAddressResolver, that.inetAddressResolver)
-                && Objects.equals(contextProvider, that.contextProvider);
+                && Objects.equals(contextProvider, that.contextProvider)
+                && Objects.equals(timeoutMS, that.timeoutMS);
     }
 
     @Override
@@ -1005,7 +1084,8 @@ public int hashCode() {
                 commandListeners, codecRegistry, loggerSettings, clusterSettings, socketSettings,
                 heartbeatSocketSettings, connectionPoolSettings, serverSettings, sslSettings, applicationName, compressorList,
                 uuidRepresentation, serverApi, autoEncryptionSettings, heartbeatSocketTimeoutSetExplicitly,
-                heartbeatConnectTimeoutSetExplicitly, dnsClient, inetAddressResolver, contextProvider);
+                heartbeatConnectTimeoutSetExplicitly, dnsClient, inetAddressResolver, contextProvider, timeoutMS);
+
     }
 
     @Override
@@ -1035,10 +1115,12 @@ public String toString() {
                 + ", dnsClient=" + dnsClient
                 + ", inetAddressResolver=" + inetAddressResolver
                 + ", contextProvider=" + contextProvider
+                + ", timeoutMS=" + timeoutMS
                 + '}';
     }
 
     private MongoClientSettings(final Builder builder) {
+        isTrue("timeoutMS > 0 ", builder.timeoutMS == null || builder.timeoutMS >= 0);
         readPreference = builder.readPreference;
         writeConcern = builder.writeConcern;
         retryWrites = builder.retryWrites;
@@ -1073,5 +1155,6 @@ private MongoClientSettings(final Builder builder) {
         heartbeatSocketTimeoutSetExplicitly = builder.heartbeatSocketTimeoutMS != 0;
         heartbeatConnectTimeoutSetExplicitly = builder.heartbeatConnectTimeoutMS != 0;
         contextProvider = builder.contextProvider;
+        timeoutMS = builder.timeoutMS;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/MongoCredential.java b/driver-core/src/main/com/mongodb/MongoCredential.java
index 8f731027cf4..f55251a7603 100644
--- a/driver-core/src/main/com/mongodb/MongoCredential.java
+++ b/driver-core/src/main/com/mongodb/MongoCredential.java
@@ -19,6 +19,7 @@
 import com.mongodb.annotations.Beta;
 import com.mongodb.annotations.Evolving;
 import com.mongodb.annotations.Immutable;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 
 import java.time.Duration;
@@ -182,7 +183,7 @@ public final class MongoCredential {
      * @see AwsCredential
      * @since 4.4
      */
-    @Beta(Beta.Reason.CLIENT)
+    @Beta(Reason.CLIENT)
     public static final String AWS_CREDENTIAL_PROVIDER_KEY = "AWS_CREDENTIAL_PROVIDER";
 
     /**
diff --git a/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java
index a48328b5ca9..e257991ccda 100644
--- a/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java
+++ b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java
@@ -16,6 +16,8 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import org.bson.BsonDocument;
 
 /**
@@ -26,6 +28,18 @@
 public class MongoExecutionTimeoutException extends MongoException {
     private static final long serialVersionUID = 5955669123800274594L;
 
+    /**
+     * Construct a new instance.
+     *
+     * @param message the error message
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public MongoExecutionTimeoutException(final String message) {
+        super(message);
+
+    }
+
     /**
      * Construct a new instance.
      *
diff --git a/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java
new file mode 100644
index 00000000000..707df3e7b73
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb;
+
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for
+ * the full execution of operation.
+ *
+ * <p>The {@code MongoOperationTimeoutException} might provide information about the underlying
+ * cause of the timeout, if available. For example, if retries are attempted due to transient failures,
+ * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended
+ * as the cause to this {@code MongoOperationTimeoutException}.
+ *
+ * <p>The key difference between {@code MongoOperationTimeoutException} and {@code MongoExecutionTimeoutException}
+ * lies in the nature of these exceptions. {@code MongoExecutionTimeoutException} indicates a server-side timeout
+ * capped by a user-specified number. These server errors are transformed into the new {@code MongoOperationTimeoutException}.
+ * On the other hand, {@code MongoOperationExecutionException} denotes a timeout during the execution of the entire operation.
+ *
+ * @see MongoClientSettings.Builder#timeout(long, TimeUnit)
+ * @see MongoClientSettings#getTimeout(TimeUnit)
+ * @since 5.2
+ */
+@Alpha(Reason.CLIENT)
+public final class MongoOperationTimeoutException extends MongoTimeoutException {
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Construct a new instance.
+     *
+     * @param message the message
+     */
+    public MongoOperationTimeoutException(final String message) {
+        super(message);
+    }
+
+    /**
+     * Construct a new instance
+     * @param message the message
+     * @param cause the cause
+     */
+    public MongoOperationTimeoutException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java
new file mode 100644
index 00000000000..bd95430e595
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb;
+
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+
+/**
+ * This exception is thrown when there is a timeout writing a response from the socket.
+ *
+ * @since 5.2
+ */
+@Alpha(Reason.CLIENT)
+public class MongoSocketWriteTimeoutException extends MongoSocketException {
+
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Construct a new instance
+     *
+     * @param message the message
+     * @param address the address
+     * @param cause the cause
+     */
+    public MongoSocketWriteTimeoutException(final String message, final ServerAddress address, final Throwable cause) {
+        super(message, address, cause);
+    }
+
+}
diff --git a/driver-core/src/main/com/mongodb/MongoTimeoutException.java b/driver-core/src/main/com/mongodb/MongoTimeoutException.java
index ff9623b09f0..e2cce02403a 100644
--- a/driver-core/src/main/com/mongodb/MongoTimeoutException.java
+++ b/driver-core/src/main/com/mongodb/MongoTimeoutException.java
@@ -16,6 +16,9 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+
 /**
  * An exception indicating that the driver has timed out waiting for either a server or a connection to become available.
  */
@@ -31,4 +34,15 @@ public class MongoTimeoutException extends MongoClientException {
     public MongoTimeoutException(final String message) {
         super(message);
     }
+
+    /**
+     * Construct a new instance
+     * @param message the message
+     * @param cause the cause
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public MongoTimeoutException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
 }
diff --git a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java
index 1db6b4eba07..c91a3c87fc5 100644
--- a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java
+++ b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java
@@ -16,6 +16,7 @@
 package com.mongodb;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import org.bson.BsonDocument;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
@@ -26,7 +27,7 @@
  *
  * @since 4.9
  */
-@Beta(Beta.Reason.SERVER)
+@Beta(Reason.SERVER)
 public final class MongoUpdatedEncryptedFieldsException extends MongoClientException {
     private static final long serialVersionUID = 1;
 
diff --git a/driver-core/src/main/com/mongodb/TransactionOptions.java b/driver-core/src/main/com/mongodb/TransactionOptions.java
index e4cafe9161c..e5f22c22def 100644
--- a/driver-core/src/main/com/mongodb/TransactionOptions.java
+++ b/driver-core/src/main/com/mongodb/TransactionOptions.java
@@ -16,7 +16,9 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Immutable;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 
 import java.util.Objects;
@@ -24,6 +26,7 @@
 
 import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeoutNullable;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
@@ -42,6 +45,7 @@ public final class TransactionOptions {
     private final WriteConcern writeConcern;
     private final ReadPreference readPreference;
     private final Long maxCommitTimeMS;
+    private final Long timeoutMS;
 
     /**
      * Gets the read concern.
@@ -91,6 +95,34 @@ public Long getMaxCommitTime(final TimeUnit timeUnit) {
         return timeUnit.convert(maxCommitTimeMS, MILLISECONDS);
     }
 
+    /**
+     * The time limit for the full execution of the transaction.
+     *
+     * <p>If set the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using
+     *   {@link ClientSessionOptions#getDefaultTimeout(TimeUnit)} or {@link MongoClientSettings#getTimeout(TimeUnit)}
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Nullable
+    @Alpha(Reason.CLIENT)
+    public Long getTimeout(final TimeUnit timeUnit) {
+        notNull("timeUnit", timeUnit);
+        if (timeoutMS == null) {
+            return null;
+        }
+        return timeUnit.convert(timeoutMS, MILLISECONDS);
+    }
+
     /**
      * Gets an instance of a builder
      *
@@ -120,6 +152,9 @@ public static TransactionOptions merge(final TransactionOptions options, final T
                 .maxCommitTime(options.getMaxCommitTime(MILLISECONDS) == null
                                 ? defaultOptions.getMaxCommitTime(MILLISECONDS) : options.getMaxCommitTime(MILLISECONDS),
                         MILLISECONDS)
+                .timeout(options.getTimeout(MILLISECONDS) == null
+                                ? defaultOptions.getTimeout(MILLISECONDS) : options.getTimeout(MILLISECONDS),
+                        MILLISECONDS)
                 .build();
     }
 
@@ -134,6 +169,9 @@ public boolean equals(final Object o) {
 
         TransactionOptions that = (TransactionOptions) o;
 
+        if (!Objects.equals(timeoutMS, that.timeoutMS)) {
+            return false;
+        }
         if (!Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS)) {
             return false;
         }
@@ -156,6 +194,7 @@ public int hashCode() {
         result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0);
         result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0);
         result = 31 * result + (maxCommitTimeMS != null ? maxCommitTimeMS.hashCode() : 0);
+        result = 31 * result + (timeoutMS != null ? timeoutMS.hashCode() : 0);
         return result;
     }
 
@@ -165,7 +204,8 @@ public String toString() {
                 + "readConcern=" + readConcern
                 + ", writeConcern=" + writeConcern
                 + ", readPreference=" + readPreference
-                + ", maxCommitTimeMS" + maxCommitTimeMS
+                + ", maxCommitTimeMS=" + maxCommitTimeMS
+                + ", timeoutMS=" + timeoutMS
                 + '}';
     }
 
@@ -177,6 +217,8 @@ public static final class Builder {
         private WriteConcern writeConcern;
         private ReadPreference readPreference;
         private Long maxCommitTimeMS;
+        @Nullable
+        private Long timeoutMS;
 
         /**
          * Sets the read concern.
@@ -231,6 +273,36 @@ public Builder maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit
             return this;
         }
 
+        /**
+         * Sets the time limit for the full execution of the operations for this transaction.
+         *
+         * <ul>
+         *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+         *    <ul>
+         *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+         *        available</li>
+         *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+         *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+         *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+         *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+         *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.</li>
+         *   </ul>
+         *   </li>
+         *   <li>{@code 0} means infinite timeout.</li>
+         *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+         * </ul>
+         *
+         * @param timeout the timeout
+         * @param timeUnit the time unit
+         * @return this
+         * @since 5.2
+         */
+        @Alpha(Reason.CLIENT)
+        public Builder timeout(@Nullable final Long timeout, final TimeUnit timeUnit) {
+            this.timeoutMS = convertAndValidateTimeoutNullable(timeout, timeUnit);
+            return this;
+        }
+
         /**
          * Build the transaction options instance.
          *
@@ -250,5 +322,6 @@ private TransactionOptions(final Builder builder) {
         writeConcern = builder.writeConcern;
         readPreference = builder.readPreference;
         maxCommitTimeMS = builder.maxCommitTimeMS;
+        timeoutMS = builder.timeoutMS;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/annotations/Alpha.java b/driver-core/src/main/com/mongodb/annotations/Alpha.java
new file mode 100644
index 00000000000..3698c7ac860
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/annotations/Alpha.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ * Copyright 2010 The Guava Authors
+ * Copyright 2011 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.annotations;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Signifies that a public API element is in the early stages of development, subject to
+ * incompatible changes, or even removal, in a future release and may lack some intended features.
+ * An API bearing this annotation may contain known issues affecting functionality, performance,
+ * and stability. It is also exempt from any compatibility guarantees made by its containing library.
+ *
+ * <p>It is inadvisable for <i>applications</i> to use Alpha APIs in production environments or
+ * for <i>libraries</i> (which get included on users' CLASSPATHs, outside the library developers'
+ * control) to depend on these APIs. Alpha APIs are intended for <b>experimental purposes</b> only.</p>
+ */
+@Retention(RetentionPolicy.CLASS)
+@Target({
+        ElementType.ANNOTATION_TYPE,
+        ElementType.CONSTRUCTOR,
+        ElementType.FIELD,
+        ElementType.METHOD,
+        ElementType.PACKAGE,
+        ElementType.TYPE })
+@Documented
+@Beta(Reason.CLIENT)
+public @interface Alpha {
+    /**
+     * @return The reason an API element is marked with {@link Alpha}.
+     */
+    Reason[] value();
+}
diff --git a/driver-core/src/main/com/mongodb/annotations/Beta.java b/driver-core/src/main/com/mongodb/annotations/Beta.java
index a44dae43cd5..55753ddc051 100644
--- a/driver-core/src/main/com/mongodb/annotations/Beta.java
+++ b/driver-core/src/main/com/mongodb/annotations/Beta.java
@@ -47,25 +47,10 @@
         ElementType.PACKAGE,
         ElementType.TYPE })
 @Documented
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public @interface Beta {
     /**
      * @return The reason an API element is marked with {@link Beta}.
      */
     Reason[] value();
-
-    /**
-     * @see Beta#value()
-     */
-    enum Reason {
-        /**
-         * The driver API is in preview.
-         */
-        CLIENT,
-        /**
-         * The driver API relies on the server API, which is in preview.
-         * We still may decide to change the driver API even if the server API stays unchanged.
-         */
-        SERVER
-    }
 }
diff --git a/driver-core/src/main/com/mongodb/annotations/Reason.java b/driver-core/src/main/com/mongodb/annotations/Reason.java
new file mode 100644
index 00000000000..af72098a9de
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/annotations/Reason.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.annotations;
+
+/**
+ * Enumerates the reasons an API element might be marked with annotations like {@link Alpha} or {@link Beta}.
+ */
+@Beta(Reason.CLIENT)
+public enum Reason {
+    /**
+     * Indicates that the status of the driver API is the reason for the annotation.
+     */
+    CLIENT,
+
+    /**
+     * The driver API relies on the server API.
+     * This dependency is the reason for the annotation and suggests that changes in the server API could impact the driver API.
+     */
+    SERVER
+}
diff --git a/driver-core/src/main/com/mongodb/assertions/Assertions.java b/driver-core/src/main/com/mongodb/assertions/Assertions.java
index 9866c222c6d..a40b4e4b7b6 100644
--- a/driver-core/src/main/com/mongodb/assertions/Assertions.java
+++ b/driver-core/src/main/com/mongodb/assertions/Assertions.java
@@ -20,10 +20,11 @@
 import com.mongodb.lang.Nullable;
 
 import java.util.Collection;
+import java.util.function.Function;
 import java.util.function.Supplier;
 
 /**
- * <p>Design by contract assertions.</p> <p>This class is not part of the public API and may be removed or changed at any time.</p>
+ * <p>Design by contract assertions.</p>
  * All {@code assert...} methods throw {@link AssertionError} and should be used to check conditions which may be violated if and only if
  * the driver code is incorrect. The intended usage of this methods is the same as of the
  * <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/language/assert.html">Java {@code assert} statement</a>. The reason
@@ -104,6 +105,24 @@ public static void isTrueArgument(final String name, final boolean condition) {
         }
     }
 
+    /**
+     * Throw IllegalArgumentException if the condition returns false.
+     *
+     * @param msg the error message if the condition returns false
+     * @param supplier the supplier of the value
+     * @param condition the condition function
+     * @return the supplied value if it meets the condition
+     * @param <T> the type of the supplied value
+     */
+    public static <T> T isTrueArgument(final String msg, final Supplier<T> supplier, final Function<T, Boolean> condition) {
+        T value = doesNotThrow(supplier);
+        if (!condition.apply(value)) {
+            throw new IllegalArgumentException(msg);
+        }
+
+        return value;
+    }
+
     /**
      * Throw IllegalArgumentException if the collection contains a null value.
      *
diff --git a/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java
new file mode 100644
index 00000000000..cdaa92d4923
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.client.cursor;
+
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * The timeout mode for a cursor
+ *
+ * <p>For operations that create cursors, {@code timeoutMS} can either cap the lifetime of the cursor or be applied separately to the
+ * original operation and all next calls.
+ * </p>
+ * @see com.mongodb.MongoClientSettings#getTimeout(TimeUnit)
+ * @since 5.2
+ */
+@Alpha(Reason.CLIENT)
+public enum TimeoutMode {
+
+    /**
+     * The timeout lasts for the lifetime of the cursor
+     */
+    CURSOR_LIFETIME,
+
+    /**
+     * The timeout is reset for each batch iteration of the cursor
+     */
+    ITERATION
+}
diff --git a/driver-core/src/main/com/mongodb/client/cursor/package-info.java b/driver-core/src/main/com/mongodb/client/cursor/package-info.java
new file mode 100644
index 00000000000..ea907688087
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/client/cursor/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains models and options that help describe MongoCollection operations
+ */
+@NonNullApi
+package com.mongodb.client.cursor;
+
+import com.mongodb.lang.NonNullApi;
diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java
index 08e2fb10b02..53e9e1eaf52 100644
--- a/driver-core/src/main/com/mongodb/client/model/Aggregates.java
+++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.model.densify.DensifyOptions;
 import com.mongodb.client.model.densify.DensifyRange;
 import com.mongodb.client.model.fill.FillOptions;
@@ -955,7 +956,7 @@ public static Bson searchMeta(final SearchCollector collector, final SearchOptio
      * @mongodb.server.release 6.0.10
      * @since 4.11
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public static Bson vectorSearch(
             final FieldSearchPath path,
             final Iterable<Double> queryVector,
@@ -984,7 +985,7 @@ public static Bson vectorSearch(
      * @mongodb.server.release 6.0.10
      * @since 4.11
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public static Bson vectorSearch(
             final FieldSearchPath path,
             final Iterable<Double> queryVector,
diff --git a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java
index 5aa79112871..31165688d4a 100644
--- a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import org.bson.conversions.Bson;
 
@@ -353,7 +354,7 @@ public CreateCollectionOptions changeStreamPreAndPostImagesOptions(
      * @since 4.7
      * @mongodb.server.release 7.0
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     @Nullable
     public Bson getEncryptedFields() {
         return encryptedFields;
@@ -370,7 +371,7 @@ public Bson getEncryptedFields() {
      * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption
      * @mongodb.server.release 7.0
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public CreateCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) {
         this.encryptedFields = encryptedFields;
         return this;
diff --git a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java
index eba101ac000..537efdc1716 100644
--- a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java
+++ b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.model.vault.DataKeyOptions;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -28,7 +29,7 @@
  *
  * @since 4.9
  */
-@Beta(Beta.Reason.SERVER)
+@Beta(Reason.SERVER)
 public final class CreateEncryptedCollectionParams {
     private final String kmsProvider;
     @Nullable
diff --git a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java
index 5c904888c00..cf2dbca66c4 100644
--- a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import org.bson.conversions.Bson;
 
@@ -39,7 +40,7 @@ public class DropCollectionOptions {
      * @since 4.7
      * @mongodb.server.release 7.0
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     @Nullable
     public Bson getEncryptedFields() {
         return encryptedFields;
@@ -56,7 +57,7 @@ public Bson getEncryptedFields() {
      * @mongodb.server.release 7.0
      * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public DropCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) {
         this.encryptedFields = encryptedFields;
         return this;
diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java
index e92a95abf81..98fd2810ed5 100644
--- a/driver-core/src/main/com/mongodb/client/model/Projections.java
+++ b/driver-core/src/main/com/mongodb/client/model/Projections.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.model.search.FieldSearchPath;
 import com.mongodb.client.model.search.SearchCollector;
 import com.mongodb.client.model.search.SearchCount;
@@ -223,7 +224,7 @@ public static Bson metaSearchScore(final String fieldName) {
      * @mongodb.server.release 6.0.10
      * @since 4.11
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public static Bson metaVectorSearchScore(final String fieldName) {
         return meta(fieldName, "vectorSearchScore");
     }
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/Branches.java b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java
index 1a576cfe581..c6b414de213 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/Branches.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.assertions.Assertions;
 
 import java.util.ArrayList;
@@ -36,7 +37,7 @@
  * @param <T> the type of the values that may be checked.
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public final class Branches<T extends MqlValue> {
 
     Branches() {
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java
index 9b1b88e4467..b068c118ad3 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.assertions.Assertions;
 
 import java.util.ArrayList;
@@ -32,7 +33,7 @@
  * @param <R> the type of the value produced.
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public final class BranchesIntermediary<T extends MqlValue, R extends MqlValue> extends BranchesTerminal<T, R> {
     BranchesIntermediary(final List<Function<T, SwitchCase<R>>> branches) {
         super(branches, null);
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java
index f72cb5cb1f4..299942ebdbf 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 
 import java.util.List;
@@ -30,7 +31,7 @@
  * @param <R> the type of the value produced.
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public class BranchesTerminal<T extends MqlValue, R extends MqlValue> {
 
     private final List<Function<T, SwitchCase<R>>> branches;
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java
index d4176b7205f..893c57c5c86 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.annotations.Beta;
 import com.mongodb.annotations.Immutable;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import org.bson.codecs.Codec;
 import org.bson.codecs.configuration.CodecProvider;
@@ -35,7 +36,7 @@
  *
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 @Immutable
 public final class ExpressionCodecProvider implements CodecProvider {
     @Override
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java
index 047e294c8e9..e979b4687e7 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -33,7 +34,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlArray<T extends MqlValue> extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java
index 5e594a757c7..28290cf25f4 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -28,7 +29,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlBoolean extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java
index 7c39057ee23..b6600aaf689 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -30,7 +31,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlDate extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java
index b99d5b3354b..c60fde8f82a 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.assertions.Assertions;
 import org.bson.conversions.Bson;
@@ -40,7 +41,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlDocument extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java
index bcb1f26e251..dffa35405f1 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -34,7 +35,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlEntry<T extends MqlValue> extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java
index 0fe85fd88d9..46380b57773 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -30,7 +31,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlInteger extends MqlNumber {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java
index 24ee3ef405b..58a279c89c7 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.assertions.Assertions;
 
@@ -35,7 +36,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlMap<T extends MqlValue> extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java
index ec3099047b8..7b6590b7624 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.assertions.Assertions;
 
@@ -31,7 +32,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlNumber extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java
index dd24a8c94a2..e5b6e8fa8bc 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -30,7 +31,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlString extends MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java
index 9366ce77fe9..8cb50885584 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.util.function.Function;
@@ -89,7 +90,7 @@
  * @since 4.9.0
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MqlValue {
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java
index 8d791dc6b3b..a2d58fbc02b 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.mql;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.assertions.Assertions;
 import org.bson.BsonArray;
 import org.bson.BsonBoolean;
@@ -46,7 +47,7 @@
  *
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public final class MqlValues {
 
     private MqlValues() {}
diff --git a/driver-core/src/main/com/mongodb/client/model/mql/package-info.java b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java
index 08cbc6195a7..caef0925787 100644
--- a/driver-core/src/main/com/mongodb/client/model/mql/package-info.java
+++ b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java
@@ -19,8 +19,9 @@
  * @see com.mongodb.client.model.mql.MqlValues
  * @since 4.9.0
  */
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 @NonNullApi
 package com.mongodb.client.model.mql;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.NonNullApi;
diff --git a/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java
index 11411ca923d..d8a2fe5e908 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface AddSearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java
index 2a700e6a770..447de8168cd 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -24,7 +25,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface AutocompleteSearchOperator extends SearchOperator {
     @Override
     AutocompleteSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java
index 3d1549fb2fa..b12a86ae78a 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface CompoundSearchOperator extends CompoundSearchOperatorBase, SearchOperator {
     @Override
     CompoundSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java
index f3fe27dbe3d..2834199a4e0 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -26,7 +27,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface CompoundSearchOperatorBase {
     /**
      * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that must all be satisfied.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java
index 31c9cfb4c21..463df7634e3 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface ConstantSearchScore extends SearchScore {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java
index e7ae9be59f2..691ee643572 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface ConstantSearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java
index 5edb7a02756..8421d058eeb 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.time.Duration;
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface DateNearSearchOperator extends SearchOperator {
     @Override
     DateNearSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java
index dfa98485837..f8c654cae1d 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -24,7 +25,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface DateRangeSearchOperator extends DateRangeSearchOperatorBase, SearchOperator {
     @Override
     DateRangeSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java
index b7db8c190e9..df8fbaa93d8 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 import java.time.Instant;
@@ -29,7 +30,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface DateRangeSearchOperatorBase {
     /**
      * Creates a new {@link DateRangeSearchOperator} that tests if values are within (l; ∞).
diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java
index 936ac3040f8..39d8bb2ddf0 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface DateSearchFacet extends SearchFacet {
     /**
      * Creates a new {@link DateSearchFacet} with the default bucket specified.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java
index cb847a49b66..847070dc3bc 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface ExistsSearchOperator extends SearchOperator {
     @Override
     ExistsSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java
index 72be0245b2c..01190216633 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface FacetSearchCollector extends SearchCollector {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java
index cc4b89f6381..2be4cdecb90 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import org.bson.conversions.Bson;
 
@@ -26,7 +27,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface FieldSearchPath extends SearchPath {
     /**
      * Creates a new {@link FieldSearchPath} with the name of the alternate analyzer specified.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java
index 92b414ebbc8..df23133d1a8 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface FilterCompoundSearchOperator extends CompoundSearchOperator {
     @Override
     FilterCompoundSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java
index 047cf65b2e4..e2bf09bf1a5 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface FunctionSearchScore extends SearchScore {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java
index 7afe5fc1c8a..2acbb244537 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import org.bson.conversions.Bson;
 
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface FuzzySearchOptions extends Bson {
     /**
      * Creates a new {@link FuzzySearchOptions} with the maximum
diff --git a/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java
index 038d5973d78..b3ac5fadedb 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface GaussSearchScoreExpression extends SearchScoreExpression {
     /**
      * Creates a new {@link GaussSearchScoreExpression} which does not decay, i.e., its output stays 1, if the value of the
diff --git a/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java
index 5c02fce3030..1501bbd819e 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.geojson.Point;
 
@@ -25,7 +26,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface GeoNearSearchOperator extends SearchOperator {
     @Override
     GeoNearSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java
index f1499a5de16..40ad061cbcb 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface Log1pSearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java
index 10ad3b9d40d..ae4e5fa8725 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface LogSearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java
index 888d66d50b0..15576d4a5b6 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface LowerBoundSearchCount extends SearchCount {
     /**
      * Creates a new {@link LowerBoundSearchCount} that instructs to count documents up to the {@code threshold} exactly,
diff --git a/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java
index 31d330ba161..e6ab2332bfe 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MultiplySearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java
index e9715a9b076..d9db7f7e34b 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MustCompoundSearchOperator extends CompoundSearchOperator {
     @Override
     MustCompoundSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java
index aad0bb633cc..5bdcc56009d 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface MustNotCompoundSearchOperator extends CompoundSearchOperator {
     @Override
     MustNotCompoundSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java
index 1baf5f2303f..65d6ec4969e 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -24,7 +25,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface NumberNearSearchOperator extends SearchOperator {
     @Override
     NumberNearSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java
index e0acad425c6..fe5d37bdc41 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -24,7 +25,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface NumberRangeSearchOperator extends NumberRangeSearchOperatorBase, SearchOperator {
     @Override
     NumberRangeSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java
index 2492f1db11c..daa31d48656 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface NumberRangeSearchOperatorBase {
     /**
      * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (l; ∞).
diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java
index 4fc6bc27d21..4587f688097 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface NumberSearchFacet extends SearchFacet {
     /**
      * Creates a new {@link NumberSearchFacet} with the default bucket specified.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java
index 37c675e523b..40459fa1724 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface PathBoostSearchScore extends SearchScore {
     /**
      * Creates a new {@link PathBoostSearchScore} with the value to fall back to
diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java
index a144addae89..b3c14025f4e 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface PathSearchScoreExpression extends SearchScoreExpression {
     /**
      * Creates a new {@link PathSearchScoreExpression} with the value to fall back to
diff --git a/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java
index 89491f5c935..2a36a679ad5 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface RelevanceSearchScoreExpression extends SearchScoreExpression {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java
index a93c5690699..6f2c45b4961 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Aggregates;
 import com.mongodb.client.model.Projections;
@@ -34,7 +35,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchCollector extends Bson {
     /**
      * Returns a {@link SearchCollector} that groups results by values or ranges in the specified faceted fields and returns the count
@@ -45,7 +46,7 @@ public interface SearchCollector extends Bson {
      * @return The requested {@link SearchCollector}.
      * @mongodb.atlas.manual atlas-search/facet/ facet collector
      */
-    @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+    @Beta({Reason.CLIENT, Reason.SERVER})
     static FacetSearchCollector facet(final SearchOperator operator, final Iterable<? extends SearchFacet> facets) {
         notNull("operator", operator);
         notNull("facets", facets);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java
index bb80a894f95..f9a5917582b 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Projections;
 import org.bson.BsonDocument;
@@ -33,7 +34,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface SearchCount extends Bson {
     /**
      * Returns a {@link SearchCount} that instructs to count documents exactly.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java
index fcc4e2866b8..4aac0fef089 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import org.bson.BsonDocument;
 import org.bson.BsonType;
@@ -43,7 +44,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface SearchFacet extends Bson {
     /**
      * Returns a {@link SearchFacet} that allows narrowing down search results based on the most frequent
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java
index c337be57e5b..6610c57590f 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Projections;
 import org.bson.BsonDocument;
@@ -37,7 +38,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchHighlight extends Bson {
     /**
      * Creates a new {@link SearchHighlight} with the maximum number of characters to examine on a document
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java
index e9fd4796234..9234db91c51 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Aggregates;
 import com.mongodb.client.model.geojson.Point;
@@ -40,7 +41,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchOperator extends Bson {
     /**
      * Creates a new {@link SearchOperator} with the scoring modifier specified.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java
index 8550c672ee5..f5cd0261e8f 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Aggregates;
 import org.bson.conversions.Bson;
@@ -29,7 +30,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchOptions extends Bson {
     /**
      * Creates a new {@link SearchOptions} with the index name specified.
@@ -53,7 +54,7 @@ public interface SearchOptions extends Bson {
      * @param option The counting option.
      * @return A new {@link SearchOptions}.
      */
-    @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+    @Beta({Reason.CLIENT, Reason.SERVER})
     SearchOptions count(SearchCount option);
 
     /**
@@ -63,7 +64,7 @@ public interface SearchOptions extends Bson {
      * @return A new {@link SearchOptions}.
      * @mongodb.atlas.manual atlas-search/return-stored-source/ Return stored source fields
      */
-    @Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+    @Beta({Reason.CLIENT, Reason.SERVER})
     SearchOptions returnStoredSource(boolean returnStoredSource);
 
     /**
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java
index c620c2995f0..7213f3f894b 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java
@@ -17,6 +17,7 @@
 
 import com.mongodb.annotations.Beta;
 import com.mongodb.annotations.Sealed;
+import com.mongodb.annotations.Reason;
 import com.mongodb.internal.client.model.Util;
 import org.bson.BsonDocument;
 import org.bson.BsonString;
@@ -37,7 +38,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchPath extends Bson {
     /**
      * Returns a {@link SearchPath} for the given {@code path}.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java
index 7c241e8ec06..825264cf7f5 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Projections;
 import org.bson.BsonDocument;
@@ -34,7 +35,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchScore extends Bson {
     /**
      * Returns a {@link SearchScore} that instructs to multiply the score by the specified {@code value}.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java
index 442b361d813..268786c3344 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import org.bson.BsonDocument;
 import org.bson.BsonDouble;
@@ -36,7 +37,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface SearchScoreExpression extends Bson {
     /**
      * Returns a {@link SearchScoreExpression} that evaluates into the relevance score of a document.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java
index 388a08bcb03..a6bda94e206 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -27,7 +28,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface ShouldCompoundSearchOperator extends CompoundSearchOperator {
     @Override
     ShouldCompoundSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java
index 523d20bfe98..209eaf9ff47 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,7 +24,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface StringSearchFacet extends SearchFacet {
     /**
      * Creates a new {@link StringSearchFacet} that explicitly limits the number of facet categories.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java
index 71d1206d2d7..241639f3a47 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -24,7 +25,7 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface TextSearchOperator extends SearchOperator {
     @Override
     TextSearchOperator score(SearchScore modifier);
diff --git a/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java
index 5df56e6bbbd..2bcbde468f3 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta({Beta.Reason.CLIENT, Beta.Reason.SERVER})
+@Beta({Reason.CLIENT, Reason.SERVER})
 public interface TotalSearchCount extends SearchCount {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java
index 5b180b7c14f..d760bd60d52 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface ValueBoostSearchScore extends SearchScore {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java
index e512ab0a31c..df3607d039b 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 import com.mongodb.client.model.Aggregates;
 import com.mongodb.client.model.Filters;
@@ -30,7 +31,7 @@
  * @since 4.11
  */
 @Sealed
-@Beta(Beta.Reason.SERVER)
+@Beta(Reason.SERVER)
 public interface VectorSearchOptions extends Bson {
     /**
      * Creates a new {@link VectorSearchOptions} with the filter specified.
diff --git a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java
index 9fb66644fbd..2fceaaaad7a 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java
@@ -16,6 +16,7 @@
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.Sealed;
 
 /**
@@ -23,6 +24,6 @@
  * @since 4.7
  */
 @Sealed
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 public interface WildcardSearchPath extends SearchPath {
 }
diff --git a/driver-core/src/main/com/mongodb/client/model/search/package-info.java b/driver-core/src/main/com/mongodb/client/model/search/package-info.java
index d17cba4139e..c3664cb5560 100644
--- a/driver-core/src/main/com/mongodb/client/model/search/package-info.java
+++ b/driver-core/src/main/com/mongodb/client/model/search/package-info.java
@@ -31,8 +31,9 @@
  * @since 4.7
  */
 @NonNullApi
-@Beta(Beta.Reason.CLIENT)
+@Beta(Reason.CLIENT)
 package com.mongodb.client.model.search;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.NonNullApi;
diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java
index aef24b54765..509e467273b 100644
--- a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.vault;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinary;
 
@@ -181,7 +182,7 @@ public String getQueryType() {
      * @mongodb.server.release 6.2
      * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) {
         this.rangeOptions = rangeOptions;
         return this;
@@ -195,7 +196,7 @@ public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) {
      * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption
      */
     @Nullable
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     public RangeOptions getRangeOptions() {
         return rangeOptions;
     }
diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java
index b763b0bf112..42a6618bcdb 100644
--- a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java
+++ b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java
@@ -17,6 +17,7 @@
 package com.mongodb.client.model.vault;
 
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 
@@ -33,7 +34,7 @@
  * @mongodb.server.release 6.2
  * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption
  */
-@Beta(Beta.Reason.SERVER)
+@Beta(Reason.SERVER)
 public class RangeOptions {
 
     private BsonValue min;
diff --git a/driver-core/src/main/com/mongodb/connection/ServerDescription.java b/driver-core/src/main/com/mongodb/connection/ServerDescription.java
index d97e848c163..f3de13006d1 100644
--- a/driver-core/src/main/com/mongodb/connection/ServerDescription.java
+++ b/driver-core/src/main/com/mongodb/connection/ServerDescription.java
@@ -18,8 +18,10 @@
 
 import com.mongodb.ServerAddress;
 import com.mongodb.TagSet;
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.annotations.Reason;
 import com.mongodb.internal.connection.DecimalFormatHelper;
 import com.mongodb.internal.connection.Time;
 import com.mongodb.lang.Nullable;
@@ -70,6 +72,10 @@ public class ServerDescription {
     private final ServerAddress address;
 
     private final ServerType type;
+    /**
+     * Identifies whether the server is a mongocryptd.
+     */
+    private final boolean cryptd;
     private final String canonicalAddress;
     private final Set<String> hosts;
     private final Set<String> passives;
@@ -79,6 +85,7 @@ public class ServerDescription {
     private final TagSet tagSet;
     private final String setName;
     private final long roundTripTimeNanos;
+    private final long minRoundTripTimeNanos;
     private final boolean ok;
     private final ServerConnectionState state;
 
@@ -159,6 +166,7 @@ public boolean isHelloOk() {
     public static class Builder {
         private ServerAddress address;
         private ServerType type = UNKNOWN;
+        private boolean cryptd = false;
         private String canonicalAddress;
         private Set<String> hosts = Collections.emptySet();
         private Set<String> passives = Collections.emptySet();
@@ -168,6 +176,7 @@ public static class Builder {
         private TagSet tagSet = new TagSet();
         private String setName;
         private long roundTripTimeNanos;
+        private long minRoundTripTimeNanos;
         private boolean ok;
         private ServerConnectionState state;
         private int minWireVersion = 0;
@@ -188,6 +197,7 @@ public static class Builder {
         Builder(final ServerDescription serverDescription) {
             this.address = serverDescription.address;
             this.type = serverDescription.type;
+            this.cryptd = serverDescription.cryptd;
             this.canonicalAddress = serverDescription.canonicalAddress;
             this.hosts = serverDescription.hosts;
             this.passives = serverDescription.passives;
@@ -245,6 +255,17 @@ public Builder type(final ServerType type) {
             return this;
         }
 
+        /**
+         * Sets whether this server is a <a href="https://www.mongodb.com/docs/manual/core/queryable-encryption/reference/mongocryptd/">mongocryptd</a>.
+         *
+         * @param cryptd true if this server is a mongocryptd.
+         * @return this
+         */
+        public Builder cryptd(final boolean cryptd) {
+            this.cryptd = cryptd;
+            return this;
+        }
+
         /**
          * Sets all members of the replica set that are neither hidden, passive, nor arbiters.
          *
@@ -315,7 +336,7 @@ public Builder tagSet(@Nullable final TagSet tagSet) {
         }
 
         /**
-         * Set the time it took to make the round trip for requesting this information from the server
+         * Set the weighted average time it took to make the round trip for requesting this information from the server
          *
          * @param roundTripTime the time taken
          * @param timeUnit      the units of the time taken
@@ -326,6 +347,21 @@ public Builder roundTripTime(final long roundTripTime, final TimeUnit timeUnit)
             return this;
         }
 
+
+        /**
+         * Set the recent min time it took to make the round trip for requesting this information from the server
+         *
+         * @param minRoundTripTime the minimum time taken
+         * @param timeUnit         the units of the time taken
+         * @return this
+         * @since 5.2
+         */
+        @Alpha(Reason.CLIENT)
+        public Builder minRoundTripTime(final long minRoundTripTime, final TimeUnit timeUnit) {
+            this.minRoundTripTimeNanos = timeUnit.toNanos(minRoundTripTime);
+            return this;
+        }
+
         /**
          * Sets the name of the replica set
          *
@@ -628,6 +664,15 @@ public boolean isSecondary() {
         return ok && (type == REPLICA_SET_SECONDARY || type == SHARD_ROUTER || type == STANDALONE || type == LOAD_BALANCER);
     }
 
+    /**
+     * Returns whether this server is <a href="https://www.mongodb.com/docs/manual/core/queryable-encryption/reference/mongocryptd/">mongocryptd</a>.
+     *
+     * @return true if this server is a mongocryptd.
+     */
+    public boolean isCryptd() {
+        return cryptd;
+    }
+
     /**
      * Get a Set of strings in the format of "[hostname]:[port]" that contains all members of the replica set that are neither hidden,
      * passive, nor arbiters.
@@ -824,7 +869,7 @@ public ClusterType getClusterType() {
     }
 
     /**
-     * Get the time it took to make the round trip for requesting this information from the server in nanoseconds.
+     * Get the weighted average time it took to make the round trip for requesting this information from the server in nanoseconds.
      *
      * @return the time taken to request the information, in nano seconds
      */
@@ -832,6 +877,17 @@ public long getRoundTripTimeNanos() {
         return roundTripTimeNanos;
     }
 
+    /**
+     * Get the recent min time it took to make the round trip for requesting this information from the server in nanoseconds.
+     *
+     * @return the recent min time taken to request the information, in nano seconds
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public long getMinRoundTripTimeNanos() {
+        return minRoundTripTimeNanos;
+    }
+
     /**
      * Gets the exception thrown while attempting to determine the server description.  This is useful for diagnostic purposed when
      * determining the root cause of a connectivity failure.
@@ -843,12 +899,6 @@ public Throwable getException() {
         return exception;
     }
 
-    /**
-     * Returns true if this instance is equals to @code{o}.  Note that equality is defined to NOT include the round trip time.
-     *
-     * @param o the object to compare to
-     * @return true if this instance is equals to @code{o}
-     */
     @Override
     public boolean equals(final Object o) {
         if (this == o) {
@@ -857,7 +907,6 @@ public boolean equals(final Object o) {
         if (o == null || getClass() != o.getClass()) {
             return false;
         }
-
         ServerDescription that = (ServerDescription) o;
 
         if (maxDocumentSize != that.maxDocumentSize) {
@@ -928,6 +977,10 @@ public boolean equals(final Object o) {
             return false;
         }
 
+        if (cryptd != that.cryptd) {
+            return false;
+        }
+
         // Compare class equality and message as exceptions rarely override equals
         Class<?> thisExceptionClass = exception != null ? exception.getClass() : null;
         Class<?> thatExceptionClass = that.exception != null ? that.exception.getClass() : null;
@@ -946,30 +999,9 @@ public boolean equals(final Object o) {
 
     @Override
     public int hashCode() {
-        int result = address.hashCode();
-        result = 31 * result + type.hashCode();
-        result = 31 * result + (canonicalAddress != null ? canonicalAddress.hashCode() : 0);
-        result = 31 * result + hosts.hashCode();
-        result = 31 * result + passives.hashCode();
-        result = 31 * result + arbiters.hashCode();
-        result = 31 * result + (primary != null ? primary.hashCode() : 0);
-        result = 31 * result + maxDocumentSize;
-        result = 31 * result + tagSet.hashCode();
-        result = 31 * result + (setName != null ? setName.hashCode() : 0);
-        result = 31 * result + (electionId != null ? electionId.hashCode() : 0);
-        result = 31 * result + (setVersion != null ? setVersion.hashCode() : 0);
-        result = 31 * result + (topologyVersion != null ? topologyVersion.hashCode() : 0);
-        result = 31 * result + (lastWriteDate != null ? lastWriteDate.hashCode() : 0);
-        result = 31 * result + (int) (lastUpdateTimeNanos ^ (lastUpdateTimeNanos >>> 32));
-        result = 31 * result + (ok ? 1 : 0);
-        result = 31 * result + state.hashCode();
-        result = 31 * result + minWireVersion;
-        result = 31 * result + maxWireVersion;
-        result = 31 * result + (logicalSessionTimeoutMinutes != null ? logicalSessionTimeoutMinutes.hashCode() : 0);
-        result = 31 * result + (helloOk ? 1 : 0);
-        result = 31 * result + (exception == null ? 0 : exception.getClass().hashCode());
-        result = 31 * result + (exception == null ? 0 : exception.getMessage().hashCode());
-        return result;
+        return Objects.hash(address, type, cryptd, canonicalAddress, hosts, passives, arbiters, primary, maxDocumentSize, tagSet, setName,
+                roundTripTimeNanos, minRoundTripTimeNanos, ok, state, minWireVersion, maxWireVersion, electionId, setVersion,
+                topologyVersion, lastWriteDate, lastUpdateTimeNanos, logicalSessionTimeoutMinutes, exception, helloOk);
     }
 
     @Override
@@ -977,6 +1009,7 @@ public String toString() {
         return "ServerDescription{"
                + "address=" + address
                + ", type=" + type
+               + ", cryptd=" + cryptd
                + ", state=" + state
                + (state == CONNECTED
                   ?
@@ -986,6 +1019,7 @@ public String toString() {
                   + ", maxDocumentSize=" + maxDocumentSize
                   + ", logicalSessionTimeoutMinutes=" + logicalSessionTimeoutMinutes
                   + ", roundTripTimeNanos=" + roundTripTimeNanos
+                  + ", minRoundTripTimeNanos=" + minRoundTripTimeNanos
                   : "")
                + (isReplicaSetMember()
                   ?
@@ -1047,6 +1081,7 @@ private String getRoundTripFormattedInMilliseconds() {
     ServerDescription(final Builder builder) {
         address = notNull("address", builder.address);
         type = notNull("type", builder.type);
+        cryptd = builder.cryptd;
         state = notNull("state", builder.state);
         canonicalAddress = builder.canonicalAddress;
         hosts = builder.hosts;
@@ -1057,6 +1092,7 @@ private String getRoundTripFormattedInMilliseconds() {
         tagSet = builder.tagSet;
         setName = builder.setName;
         roundTripTimeNanos = builder.roundTripTimeNanos;
+        minRoundTripTimeNanos = builder.minRoundTripTimeNanos;
         ok = builder.ok;
         minWireVersion = builder.minWireVersion;
         maxWireVersion = builder.maxWireVersion;
diff --git a/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java
index 96083f66833..9ccb5ef0c8b 100644
--- a/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java
+++ b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java
@@ -17,6 +17,8 @@
 package com.mongodb.internal;
 
 import com.mongodb.MongoCommandException;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoSocketException;
 import org.bson.BsonArray;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
@@ -35,6 +37,15 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public final class ExceptionUtils {
+
+    public static boolean isMongoSocketException(final Throwable e) {
+        return e instanceof MongoSocketException;
+    }
+
+    public static boolean isOperationTimeoutFromSocketException(final Throwable e) {
+        return e instanceof MongoOperationTimeoutException && e.getCause() instanceof MongoSocketException;
+    }
+
     public static final class MongoCommandExceptionUtils {
         public static int extractErrorCode(final BsonDocument response) {
             return extractErrorCodeAsBson(response).intValue();
diff --git a/driver-core/src/main/com/mongodb/internal/Locks.java b/driver-core/src/main/com/mongodb/internal/Locks.java
index 984de156f27..8e8260f50d3 100644
--- a/driver-core/src/main/com/mongodb/internal/Locks.java
+++ b/driver-core/src/main/com/mongodb/internal/Locks.java
@@ -17,6 +17,7 @@
 package com.mongodb.internal;
 
 import com.mongodb.MongoInterruptedException;
+import com.mongodb.internal.function.CheckedSupplier;
 
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutContext.java b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java
new file mode 100644
index 00000000000..0b4907c2ff1
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal;
+
+import com.mongodb.MongoClientException;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.internal.time.StartTime;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
+import com.mongodb.session.ClientSession;
+
+import java.util.Objects;
+import java.util.function.LongConsumer;
+
+import static com.mongodb.assertions.Assertions.assertNull;
+import static com.mongodb.assertions.Assertions.isTrue;
+import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+/**
+ * Timeout Context.
+ *
+ * <p>The context for handling timeouts in relation to the Client Side Operation Timeout specification.</p>
+ */
+public class TimeoutContext {
+
+    private final boolean isMaintenanceContext;
+    private final TimeoutSettings timeoutSettings;
+
+    @Nullable
+    private Timeout timeout;
+    @Nullable
+    private Timeout computedServerSelectionTimeout;
+    private long minRoundTripTimeMS = 0;
+
+    @Nullable
+    private MaxTimeSupplier maxTimeSupplier = null;
+
+    public static MongoOperationTimeoutException createMongoRoundTripTimeoutException() {
+        return createMongoTimeoutException("Remaining timeoutMS is less than or equal to the server's minimum round trip time.");
+    }
+
+    public static MongoOperationTimeoutException createMongoTimeoutException(final String message) {
+        return new MongoOperationTimeoutException(message);
+    }
+
+    public static <T> T throwMongoTimeoutException(final String message) {
+        throw new MongoOperationTimeoutException(message);
+    }
+
+    public static MongoOperationTimeoutException createMongoTimeoutException(final Throwable cause) {
+        return createMongoTimeoutException("Operation exceeded the timeout limit: " + cause.getMessage(), cause);
+    }
+
+    public static MongoOperationTimeoutException createMongoTimeoutException(final String message, final Throwable cause) {
+        if (cause instanceof MongoOperationTimeoutException) {
+            return (MongoOperationTimeoutException) cause;
+        }
+        return new MongoOperationTimeoutException(message, cause);
+    }
+
+    public static TimeoutContext createMaintenanceTimeoutContext(final TimeoutSettings timeoutSettings) {
+        return new TimeoutContext(true, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS()));
+    }
+
+    public static TimeoutContext createTimeoutContext(final ClientSession session, final TimeoutSettings timeoutSettings) {
+        TimeoutContext sessionTimeoutContext = session.getTimeoutContext();
+
+        if (sessionTimeoutContext != null) {
+            TimeoutSettings sessionTimeoutSettings = sessionTimeoutContext.timeoutSettings;
+            if (timeoutSettings.getGenerationId() > sessionTimeoutSettings.getGenerationId()) {
+                throw new MongoClientException("Cannot change the timeoutMS during a transaction.");
+            }
+
+            // Check for any legacy operation timeouts
+            if (sessionTimeoutSettings.getTimeoutMS() == null) {
+                if (timeoutSettings.getMaxTimeMS() != 0) {
+                    sessionTimeoutSettings = sessionTimeoutSettings.withMaxTimeMS(timeoutSettings.getMaxTimeMS());
+                }
+                if (timeoutSettings.getMaxAwaitTimeMS() != 0) {
+                    sessionTimeoutSettings = sessionTimeoutSettings.withMaxAwaitTimeMS(timeoutSettings.getMaxAwaitTimeMS());
+                }
+                if (timeoutSettings.getMaxCommitTimeMS() != null) {
+                    sessionTimeoutSettings = sessionTimeoutSettings.withMaxCommitMS(timeoutSettings.getMaxCommitTimeMS());
+                }
+                return new TimeoutContext(sessionTimeoutSettings);
+            }
+            return sessionTimeoutContext;
+        }
+       return new TimeoutContext(timeoutSettings);
+    }
+
+    // Creates a copy of the timeout context that can be reset without resetting the original.
+    public TimeoutContext copyTimeoutContext() {
+        return new TimeoutContext(getTimeoutSettings(), getTimeout());
+    }
+
+    public TimeoutContext(final TimeoutSettings timeoutSettings) {
+        this(false, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS()));
+    }
+
+    private TimeoutContext(final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) {
+        this(false, timeoutSettings, timeout);
+    }
+
+    private TimeoutContext(final boolean isMaintenanceContext, final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) {
+        this.isMaintenanceContext = isMaintenanceContext;
+        this.timeoutSettings = timeoutSettings;
+        this.timeout = timeout;
+    }
+
+    /**
+     * Allows for the differentiation between users explicitly setting a global operation timeout via {@code timeoutMS}.
+     *
+     * @return true if a timeout has been set.
+     */
+    public boolean hasTimeoutMS() {
+        return timeoutSettings.getTimeoutMS() != null;
+    }
+
+    /**
+     * Runs the runnable if the timeout is expired.
+     * @param onExpired the runnable to run
+     */
+    public void onExpired(final Runnable onExpired) {
+        Timeout.nullAsInfinite(timeout).onExpired(onExpired);
+    }
+
+    /**
+     * Sets the recent min round trip time
+     * @param minRoundTripTimeMS the min round trip time
+     * @return this
+     */
+    public TimeoutContext minRoundTripTimeMS(final long minRoundTripTimeMS) {
+        isTrue("'minRoundTripTimeMS' must be a positive number", minRoundTripTimeMS >= 0);
+        this.minRoundTripTimeMS = minRoundTripTimeMS;
+        return this;
+    }
+
+    @Nullable
+    public Timeout timeoutIncludingRoundTrip() {
+        return timeout == null ? null : timeout.shortenBy(minRoundTripTimeMS, MILLISECONDS);
+    }
+
+    /**
+     * Returns the remaining {@code timeoutMS} if set or the {@code alternativeTimeoutMS}.
+     *
+     * @param alternativeTimeoutMS the alternative timeout.
+     * @return timeout to use.
+     */
+    public long timeoutOrAlternative(final long alternativeTimeoutMS) {
+        if (timeout == null) {
+            return alternativeTimeoutMS;
+        } else {
+            return timeout.call(MILLISECONDS,
+                    () -> 0L,
+                    (ms) -> ms,
+                    () -> throwMongoTimeoutException("The operation exceeded the timeout limit."));
+        }
+    }
+
+    public TimeoutSettings getTimeoutSettings() {
+        return timeoutSettings;
+    }
+
+    public long getMaxAwaitTimeMS() {
+        return timeoutSettings.getMaxAwaitTimeMS();
+    }
+
+    public void runMaxTimeMS(final LongConsumer onRemaining) {
+        if (maxTimeSupplier != null) {
+            runWithFixedTimeout(maxTimeSupplier.get(), onRemaining);
+            return;
+        }
+        if (timeout == null) {
+            runWithFixedTimeout(timeoutSettings.getMaxTimeMS(), onRemaining);
+            return;
+        }
+        timeout.shortenBy(minRoundTripTimeMS, MILLISECONDS)
+                .run(MILLISECONDS,
+                        () -> {},
+                        onRemaining,
+                        () -> {
+                            throw createMongoRoundTripTimeoutException();
+                        });
+
+    }
+
+    private static void runWithFixedTimeout(final long ms, final LongConsumer onRemaining) {
+        if (ms != 0) {
+            onRemaining.accept(ms);
+        }
+    }
+
+    public void resetToDefaultMaxTime() {
+        this.maxTimeSupplier = null;
+    }
+
+    /**
+     * The override will be provided as the remaining value in
+     * {@link #runMaxTimeMS}, where 0 is ignored.
+     * <p>
+     * NOTE: Suitable for static user-defined values only (i.e MaxAwaitTimeMS),
+     * not for running timeouts that adjust dynamically.
+     */
+    public void setMaxTimeOverride(final long maxTimeMS) {
+        this.maxTimeSupplier = () -> maxTimeMS;
+    }
+
+    /**
+     * The override will be provided as the remaining value in
+     * {@link #runMaxTimeMS}, where 0 is ignored.
+     */
+    public void setMaxTimeOverrideToMaxCommitTime() {
+        this.maxTimeSupplier = () -> getMaxCommitTimeMS();
+    }
+
+    @VisibleForTesting(otherwise = PRIVATE)
+    public long getMaxCommitTimeMS() {
+        Long maxCommitTimeMS = timeoutSettings.getMaxCommitTimeMS();
+        return timeoutOrAlternative(maxCommitTimeMS != null ? maxCommitTimeMS : 0);
+    }
+
+    public long getReadTimeoutMS() {
+        return timeoutOrAlternative(timeoutSettings.getReadTimeoutMS());
+    }
+
+    public long getWriteTimeoutMS() {
+        return timeoutOrAlternative(0);
+    }
+
+    public int getConnectTimeoutMs() {
+        final long connectTimeoutMS = getTimeoutSettings().getConnectTimeoutMS();
+        return Math.toIntExact(Timeout.nullAsInfinite(timeout).call(MILLISECONDS,
+                () -> connectTimeoutMS,
+                (ms) -> connectTimeoutMS == 0 ? ms : Math.min(ms, connectTimeoutMS),
+                () -> throwMongoTimeoutException("The operation exceeded the timeout limit.")));
+    }
+
+    public void resetTimeoutIfPresent() {
+        if (hasTimeoutMS()) {
+            timeout = startTimeout(timeoutSettings.getTimeoutMS());
+        }
+    }
+
+    /**
+     * Resets the timeout if this timeout context is being used by pool maintenance
+     */
+    public void resetMaintenanceTimeout() {
+        if (!isMaintenanceContext) {
+            return;
+        }
+        timeout = Timeout.nullAsInfinite(timeout).call(NANOSECONDS,
+                () -> timeout,
+                (ms) -> startTimeout(timeoutSettings.getTimeoutMS()),
+                () -> startTimeout(timeoutSettings.getTimeoutMS()));
+    }
+
+    public TimeoutContext withAdditionalReadTimeout(final int additionalReadTimeout) {
+        // Only used outside timeoutMS usage
+        assertNull(timeout);
+
+        // Check existing read timeout is infinite
+        if (timeoutSettings.getReadTimeoutMS() == 0) {
+            return this;
+        }
+
+        long newReadTimeout = getReadTimeoutMS() + additionalReadTimeout;
+        return new TimeoutContext(timeoutSettings.withReadTimeoutMS(newReadTimeout > 0 ? newReadTimeout : Long.MAX_VALUE));
+    }
+
+    @Override
+    public String toString() {
+        return "TimeoutContext{"
+                + "isMaintenanceContext=" + isMaintenanceContext
+                + ", timeoutSettings=" + timeoutSettings
+                + ", timeout=" + timeout
+                + ", minRoundTripTimeMS=" + minRoundTripTimeMS
+                + '}';
+    }
+
+    @Override
+    public boolean equals(final Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        final TimeoutContext that = (TimeoutContext) o;
+        return isMaintenanceContext == that.isMaintenanceContext
+                && minRoundTripTimeMS == that.minRoundTripTimeMS
+                && Objects.equals(timeoutSettings, that.timeoutSettings)
+                && Objects.equals(timeout, that.timeout);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(isMaintenanceContext, timeoutSettings, timeout, minRoundTripTimeMS);
+    }
+
+    @Nullable
+    public static Timeout startTimeout(@Nullable final Long timeoutMS) {
+        if (timeoutMS != null) {
+            return Timeout.expiresIn(timeoutMS, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE);
+        }
+        return null;
+    }
+
+    /**
+     * Returns the computed server selection timeout
+     *
+     * <p>Caches the computed server selection timeout if:
+     * <ul>
+     *     <li>not in a maintenance context</li>
+     *     <li>there is a timeoutMS, so to keep the same legacy behavior.</li>
+     *     <li>the server selection timeout is less than the remaining overall timeout.</li>
+     * </ul>
+     *
+     * @return the timeout context
+     */
+    public Timeout computeServerSelectionTimeout() {
+        Timeout serverSelectionTimeout = StartTime.now()
+                .timeoutAfterOrInfiniteIfNegative(getTimeoutSettings().getServerSelectionTimeoutMS(), MILLISECONDS);
+
+
+        if (isMaintenanceContext || !hasTimeoutMS()) {
+            return serverSelectionTimeout;
+        }
+
+        if (timeout != null && Timeout.earliest(serverSelectionTimeout, timeout) == timeout) {
+            return timeout;
+        }
+
+        computedServerSelectionTimeout = serverSelectionTimeout;
+        return computedServerSelectionTimeout;
+    }
+
+    /**
+     * Returns the timeout context to use for the handshake process
+     *
+     * @return a new timeout context with the cached computed server selection timeout if available or this
+     */
+    public TimeoutContext withComputedServerSelectionTimeoutContext() {
+        if (this.hasTimeoutMS() && computedServerSelectionTimeout != null) {
+            return new TimeoutContext(false, timeoutSettings, computedServerSelectionTimeout);
+        }
+        return this;
+    }
+
+    public Timeout startWaitQueueTimeout(final StartTime checkoutStart) {
+        final long ms = getTimeoutSettings().getMaxWaitTimeMS();
+        return checkoutStart.timeoutAfterOrInfiniteIfNegative(ms, MILLISECONDS);
+    }
+
+    @Nullable
+    public Timeout getTimeout() {
+        return timeout;
+    }
+
+    public interface MaxTimeSupplier {
+        long get();
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java
new file mode 100644
index 00000000000..486a893d74c
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal;
+
+import com.mongodb.MongoClientSettings;
+import com.mongodb.lang.Nullable;
+
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.mongodb.assertions.Assertions.isTrueArgument;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * Timeout Settings.
+ *
+ * <p>Includes all client based timeouts</p>
+ */
+public class TimeoutSettings {
+    private static final AtomicLong NEXT_ID = new AtomicLong(0);
+    private final long generationId;
+    private final long serverSelectionTimeoutMS;
+    private final long connectTimeoutMS;
+    @Nullable
+    private final Long timeoutMS;
+
+    // Deprecated configuration timeout options
+    private final long readTimeoutMS; // aka socketTimeoutMS
+    private final long maxWaitTimeMS; // aka waitQueueTimeoutMS
+    @Nullable
+    private final Long wTimeoutMS;
+
+    // Deprecated options for CRUD methods
+    private final long maxTimeMS;
+    private final long maxAwaitTimeMS;
+    @Nullable
+    private final Long maxCommitTimeMS;
+
+    public static final TimeoutSettings DEFAULT = create(MongoClientSettings.builder().build());
+
+    @Nullable
+    public static Long convertAndValidateTimeoutNullable(@Nullable final Long timeout, final TimeUnit timeUnit) {
+        return timeout == null ? null : convertAndValidateTimeout(timeout, timeUnit, "timeout");
+    }
+
+    public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit) {
+        return convertAndValidateTimeout(timeout, timeUnit, "timeout");
+    }
+
+    public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit, final String fieldName) {
+        return isTrueArgument(fieldName + " was too small. After conversion it was rounded to 0 milliseconds, "
+                        + " which would result in an unintended infinite timeout.",
+                () -> MILLISECONDS.convert(timeout, timeUnit),
+                (timeoutMS) ->  timeout == 0 && timeoutMS == 0 || timeoutMS > 0);
+    }
+
+    @SuppressWarnings("deprecation")
+    public static TimeoutSettings create(final MongoClientSettings settings) {
+        return new TimeoutSettings(
+                settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS),
+                settings.getSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS),
+                settings.getSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS),
+                settings.getTimeout(TimeUnit.MILLISECONDS),
+                settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS));
+    }
+
+    public static TimeoutSettings createHeartbeatSettings(final MongoClientSettings settings) {
+        return new TimeoutSettings(
+                settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS),
+                settings.getHeartbeatSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS),
+                settings.getHeartbeatSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS),
+                settings.getTimeout(TimeUnit.MILLISECONDS),
+                settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS));
+    }
+
+    public TimeoutSettings(final long serverSelectionTimeoutMS, final long connectTimeoutMS, final long readTimeoutMS,
+            @Nullable final Long timeoutMS, final long maxWaitTimeMS) {
+        this(-1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, 0, 0, null, null, maxWaitTimeMS);
+    }
+
+    TimeoutSettings(@Nullable final Long timeoutMS, final long serverSelectionTimeoutMS, final long connectTimeoutMS,
+            final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS, @Nullable final Long maxCommitTimeMS,
+            @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) {
+        this(timeoutMS != null ? NEXT_ID.incrementAndGet() : -1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS,
+                maxAwaitTimeMS, maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    private TimeoutSettings(final long generationId, @Nullable final Long timeoutMS, final long serverSelectionTimeoutMS,
+            final long connectTimeoutMS, final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS,
+            @Nullable final Long maxCommitTimeMS, @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) {
+
+        isTrueArgument("timeoutMS must be >= 0", timeoutMS == null || timeoutMS >= 0);
+        isTrueArgument("maxAwaitTimeMS must be >= 0", maxAwaitTimeMS >= 0);
+        isTrueArgument("maxTimeMS must be >= 0", maxTimeMS >= 0);
+        isTrueArgument("timeoutMS must be greater than maxAwaitTimeMS", timeoutMS == null || timeoutMS == 0
+                || timeoutMS > maxAwaitTimeMS);
+        isTrueArgument("maxCommitTimeMS must be >= 0", maxCommitTimeMS == null || maxCommitTimeMS >= 0);
+
+        this.generationId = generationId;
+        this.serverSelectionTimeoutMS = serverSelectionTimeoutMS;
+        this.connectTimeoutMS = connectTimeoutMS;
+        this.timeoutMS = timeoutMS;
+        this.maxAwaitTimeMS = maxAwaitTimeMS;
+        this.readTimeoutMS = readTimeoutMS;
+        this.maxTimeMS = maxTimeMS;
+        this.maxCommitTimeMS = maxCommitTimeMS;
+        this.wTimeoutMS = wTimeoutMS;
+        this.maxWaitTimeMS = maxWaitTimeMS;
+    }
+
+    public TimeoutSettings connectionOnly() {
+        return new TimeoutSettings(serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, null, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withTimeout(@Nullable final Long timeout, final TimeUnit timeUnit) {
+        return withTimeoutMS(convertAndValidateTimeoutNullable(timeout, timeUnit));
+    }
+
+    TimeoutSettings withTimeoutMS(@Nullable final Long timeoutMS) {
+        return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withMaxTimeMS(final long maxTimeMS) {
+        return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withMaxAwaitTimeMS(final long maxAwaitTimeMS) {
+        return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withMaxTimeAndMaxAwaitTimeMS(final long maxTimeMS, final long maxAwaitTimeMS) {
+        return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withMaxCommitMS(@Nullable final Long maxCommitTimeMS) {
+        return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withWTimeoutMS(@Nullable final Long wTimeoutMS) {
+        return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withReadTimeoutMS(final long readTimeoutMS) {
+        return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withServerSelectionTimeoutMS(final long serverSelectionTimeoutMS) {
+        return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public TimeoutSettings withMaxWaitTimeMS(final long maxWaitTimeMS) {
+        return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS,
+                maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS);
+    }
+
+    public long getServerSelectionTimeoutMS() {
+        return serverSelectionTimeoutMS;
+    }
+
+    public long getConnectTimeoutMS() {
+        return connectTimeoutMS;
+    }
+
+    @Nullable
+    public Long getTimeoutMS() {
+        return timeoutMS;
+    }
+
+    public long getMaxAwaitTimeMS() {
+        return maxAwaitTimeMS;
+    }
+
+    public long getReadTimeoutMS() {
+        return readTimeoutMS;
+    }
+
+    public long getMaxTimeMS() {
+        return maxTimeMS;
+    }
+
+    @Nullable
+    public Long getWTimeoutMS() {
+        return wTimeoutMS;
+    }
+
+    public long getMaxWaitTimeMS() {
+        return maxWaitTimeMS;
+    }
+
+    @Nullable
+    public Long getMaxCommitTimeMS() {
+        return maxCommitTimeMS;
+    }
+
+    /**
+     * The generation id represents a creation counter for {@code TimeoutSettings} that contain a {@code timeoutMS} value.
+     *
+     * <p>This is used to determine if a new set of {@code TimeoutSettings} has been created within a {@code withTransaction}
+     * block, so that a client side error can be issued.</p>
+     *
+     * @return the generation id or -1 if no timeout MS is set.
+     */
+    public long getGenerationId() {
+        return generationId;
+    }
+
+    @Override
+    public String toString() {
+        return "TimeoutSettings{"
+                + "generationId=" + generationId
+                + ", timeoutMS=" + timeoutMS
+                + ", serverSelectionTimeoutMS=" + serverSelectionTimeoutMS
+                + ", connectTimeoutMS=" + connectTimeoutMS
+                + ", readTimeoutMS=" + readTimeoutMS
+                + ", maxWaitTimeMS=" + maxWaitTimeMS
+                + ", wTimeoutMS=" + wTimeoutMS
+                + ", maxTimeMS=" + maxTimeMS
+                + ", maxAwaitTimeMS=" + maxAwaitTimeMS
+                + ", maxCommitTimeMS=" + maxCommitTimeMS
+                + '}';
+    }
+
+    @Override
+    public boolean equals(final Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        final TimeoutSettings that = (TimeoutSettings) o;
+        return serverSelectionTimeoutMS == that.serverSelectionTimeoutMS && connectTimeoutMS == that.connectTimeoutMS
+                && readTimeoutMS == that.readTimeoutMS && maxWaitTimeMS == that.maxWaitTimeMS && maxTimeMS == that.maxTimeMS
+                && maxAwaitTimeMS == that.maxAwaitTimeMS && Objects.equals(timeoutMS, that.timeoutMS)
+                && Objects.equals(wTimeoutMS, that.wTimeoutMS) && Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(generationId, serverSelectionTimeoutMS, connectTimeoutMS, timeoutMS, readTimeoutMS, maxWaitTimeMS, wTimeoutMS, maxTimeMS,
+                maxAwaitTimeMS, maxCommitTimeMS);
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java
index 33e1af001bb..a81b2fdd12c 100644
--- a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java
+++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java
@@ -16,6 +16,7 @@
 
 package com.mongodb.internal.async;
 
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.function.RetryState;
 import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier;
 
@@ -267,10 +268,10 @@ default <R> AsyncSupplier<R> thenSupply(final AsyncSupplier<R> supplier) {
      * @see RetryingAsyncCallbackSupplier
      */
     default AsyncRunnable thenRunRetryingWhile(
-            final AsyncRunnable runnable, final Predicate<Throwable> shouldRetry) {
+            final TimeoutContext timeoutContext, final AsyncRunnable runnable, final Predicate<Throwable> shouldRetry) {
         return thenRun(callback -> {
             new RetryingAsyncCallbackSupplier<Void>(
-                    new RetryState(),
+                    new RetryState(timeoutContext),
                     (rs, lastAttemptFailure) -> shouldRetry.test(lastAttemptFailure),
                     // `finish` is required here instead of `unsafeFinish`
                     // because only `finish` meets the contract of
diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java
index 89329f16a24..e1cecf721fc 100644
--- a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java
+++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java
@@ -15,7 +15,9 @@
  */
 package com.mongodb.internal.async.function;
 
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.async.function.LoopState.AttachmentKey;
 import com.mongodb.lang.NonNull;
@@ -29,6 +31,7 @@
 import static com.mongodb.assertions.Assertions.assertFalse;
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.assertTrue;
+import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException;
 
 /**
  * Represents both the state associated with a retryable activity and a handle that can be used to affect retrying, e.g.,
@@ -48,25 +51,62 @@ public final class RetryState {
 
     private final LoopState loopState;
     private final int attempts;
+    private final boolean retryUntilTimeoutThrowsException;
     @Nullable
-    private Throwable exception;
+    private Throwable previouslyChosenException;
 
     /**
-     * @param retries A non-negative number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited.
+     * Creates a {@code RetryState} with a positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as
+     * being unlimited.
+     * <p>
+     * If a timeout is not specified in the {@link TimeoutContext#hasTimeoutMS()}, the specified {@code retries} param acts as a fallback
+     * bound. Otherwise, retries are unbounded until the timeout is reached.
+     * <p>
+     * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method,
+     * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}.
+     * </p>
+     *
+     * @param retries A positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited.
+     * @param timeoutContext A timeout context that will be used to determine if the operation has timed out.
      * @see #attempts()
      */
-    public RetryState(final int retries) {
-        assertTrue(retries >= 0);
-        loopState = new LoopState();
-        attempts = retries == INFINITE_ATTEMPTS ? INFINITE_ATTEMPTS : retries + 1;
+    public static RetryState withRetryableState(final int retries, final TimeoutContext timeoutContext) {
+        assertTrue(retries > 0);
+        if (timeoutContext.hasTimeoutMS()){
+            return new RetryState(INFINITE_ATTEMPTS, timeoutContext);
+        }
+        return new RetryState(retries, null);
+    }
+
+    public static RetryState withNonRetryableState() {
+        return new RetryState(0, null);
     }
 
     /**
      * Creates a {@link RetryState} that does not limit the number of retries.
+     * The number of attempts is limited iff {@link TimeoutContext#hasTimeoutMS()} is true and timeout has expired.
+     * <p>
+     * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method,
+     * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}.
+     * </p>
+     *
+     * @param timeoutContext A timeout context that will be used to determine if the operation has timed out.
+     * @see #attempts()
+     */
+    public RetryState(final TimeoutContext timeoutContext) {
+        this(INFINITE_ATTEMPTS, timeoutContext);
+    }
+
+    /**
+     * @param retries A non-negative number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited.
+     * @param timeoutContext A timeout context that will be used to determine if the operation has timed out.
      * @see #attempts()
      */
-    public RetryState() {
-        this(INFINITE_ATTEMPTS);
+    private RetryState(final int retries, @Nullable final TimeoutContext timeoutContext) {
+        assertTrue(retries >= 0);
+        loopState = new LoopState();
+        attempts = retries == INFINITE_ATTEMPTS ? INFINITE_ATTEMPTS : retries + 1;
+        this.retryUntilTimeoutThrowsException = timeoutContext != null && timeoutContext.hasTimeoutMS();
     }
 
     /**
@@ -136,7 +176,7 @@ void advanceOrThrow(final Throwable attemptException, final BinaryOperator<Throw
     }
 
     /**
-     * @param onlyRuntimeExceptions {@code true} iff the method must expect {@link #exception} and {@code attemptException} to be
+     * @param onlyRuntimeExceptions {@code true} iff the method must expect {@link #previouslyChosenException} and {@code attemptException} to be
      * {@link RuntimeException}s and must not explicitly handle other {@link Throwable} types, of which only {@link Error} is possible
      * as {@link RetryState} does not have any source of {@link Exception}s.
      * @param onAttemptFailureOperator See {@link #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate)}.
@@ -150,19 +190,36 @@ private void doAdvanceOrThrow(final Throwable attemptException,
         if (onlyRuntimeExceptions) {
             assertTrue(isRuntime(attemptException));
         }
-        assertTrue(!isFirstAttempt() || exception == null);
-        Throwable newlyChosenException = callOnAttemptFailureOperator(exception, attemptException, onlyRuntimeExceptions, onAttemptFailureOperator);
-        if (isLastAttempt()) {
-            exception = newlyChosenException;
-            throw exception;
+        assertTrue(!isFirstAttempt() || previouslyChosenException == null);
+        Throwable newlyChosenException = callOnAttemptFailureOperator(previouslyChosenException, attemptException, onlyRuntimeExceptions, onAttemptFailureOperator);
+
+        /*
+         * A MongoOperationTimeoutException indicates that the operation timed out, either during command execution or server selection.
+         * The timeout for server selection is determined by the computedServerSelectionMS = min(serverSelectionTimeoutMS, timeoutMS).
+         *
+         * It is important to check if the exception is an instance of MongoOperationTimeoutException to detect a timeout.
+         */
+        if (isLastAttempt() || attemptException instanceof MongoOperationTimeoutException) {
+            previouslyChosenException = newlyChosenException;
+            /*
+             * The function of isLastIteration() is to indicate if retrying has
+             * been explicitly halted. Such a stop is not interpreted as
+             * a timeout exception but as a deliberate cessation of retry attempts.
+             */
+            if (retryUntilTimeoutThrowsException && !loopState.isLastIteration()) {
+                previouslyChosenException = createMongoTimeoutException(
+                        "Retry attempt exceeded the timeout limit.",
+                        previouslyChosenException);
+            }
+            throw previouslyChosenException;
         } else {
-            // note that we must not update the state, e.g, `exception`, `loopState`, before calling `retryPredicate`
+            // note that we must not update the state, e.g, `previouslyChosenException`, `loopState`, before calling `retryPredicate`
             boolean retry = shouldRetry(this, attemptException, newlyChosenException, onlyRuntimeExceptions, retryPredicate);
-            exception = newlyChosenException;
+            previouslyChosenException = newlyChosenException;
             if (retry) {
                 assertTrue(loopState.advance());
             } else {
-                throw exception;
+                throw previouslyChosenException;
             }
         }
     }
@@ -249,9 +306,9 @@ private static boolean isRuntime(@Nullable final Throwable exception) {
     public void breakAndThrowIfRetryAnd(final Supplier<Boolean> predicate) throws RuntimeException {
         assertFalse(loopState.isLastIteration());
         if (!isFirstAttempt()) {
-            assertNotNull(exception);
-            assertTrue(exception instanceof RuntimeException);
-            RuntimeException localException = (RuntimeException) exception;
+            assertNotNull(previouslyChosenException);
+            assertTrue(previouslyChosenException instanceof RuntimeException);
+            RuntimeException localException = (RuntimeException) previouslyChosenException;
             try {
                 if (predicate.get()) {
                     loopState.markAsLastIteration();
@@ -310,14 +367,23 @@ public boolean isFirstAttempt() {
 
     /**
      * Returns {@code true} iff the current attempt is known to be the last one, i.e., it is known that no more retries will be made.
-     * An attempt is known to be the last one either because the number of {@linkplain #attempts() attempts} is limited and the current
-     * attempt is the last one, or because {@link #breakAndThrowIfRetryAnd(Supplier)} /
-     * {@link #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} / {@link #markAsLastAttempt()} was called.
+     * An attempt is known to be the last one iff any of the following applies:
+     * <ul>
+     *   <li>{@link #breakAndThrowIfRetryAnd(Supplier)} / {@link #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} / {@link #markAsLastAttempt()} was called.</li>
+     *   <li>A timeout is set and has been reached.</li>
+     *   <li>No timeout is set, and the number of {@linkplain #attempts() attempts} is limited, and the current attempt is the last one.</li>
+     * </ul>
      *
      * @see #attempts()
      */
     public boolean isLastAttempt() {
-        return attempt() == attempts - 1 || loopState.isLastIteration();
+        if (loopState.isLastIteration()){
+            return true;
+        }
+        if (retryUntilTimeoutThrowsException) {
+           return false;
+        }
+        return attempt() == attempts - 1;
     }
 
     /**
@@ -332,9 +398,9 @@ public int attempt() {
     /**
      * Returns a positive maximum number of attempts:
      * <ul>
-     *     <li>0 if the number of retries is {@linkplain #RetryState() unlimited};</li>
+     *     <li>0 if the number of retries is {@linkplain #RetryState(TimeoutContext) unlimited};</li>
      *     <li>1 if no retries are allowed;</li>
-     *     <li>{@link #RetryState(int) retries} + 1 otherwise.</li>
+     *     <li>{@link #RetryState(int, TimeoutContext) retries} + 1 otherwise.</li>
      * </ul>
      *
      * @see #attempt()
@@ -353,8 +419,8 @@ public int attempts() {
      * In synchronous code the returned exception is of the type {@link RuntimeException}.
      */
     public Optional<Throwable> exception() {
-        assertTrue(exception == null || !isFirstAttempt());
-        return Optional.ofNullable(exception);
+        assertTrue(previouslyChosenException == null || !isFirstAttempt());
+        return Optional.ofNullable(previouslyChosenException);
     }
 
     /**
@@ -377,7 +443,7 @@ public String toString() {
         return "RetryState{"
                 + "loopState=" + loopState
                 + ", attempts=" + (attempts == INFINITE_ATTEMPTS ? "infinite" : attempts)
-                + ", exception=" + exception
+                + ", exception=" + previouslyChosenException
                 + '}';
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/async/package-info.java b/driver-core/src/main/com/mongodb/internal/async/package-info.java
index f6f0693821d..39b952eead1 100644
--- a/driver-core/src/main/com/mongodb/internal/async/package-info.java
+++ b/driver-core/src/main/com/mongodb/internal/async/package-info.java
@@ -15,7 +15,6 @@
  */
 
 /**
- * This package contains cluster and connection event related classes
  */
 
 @NonNullApi
diff --git a/driver-core/src/main/com/mongodb/internal/authentication/package-info.java b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java
index 7a697f21ace..bbeb09628af 100644
--- a/driver-core/src/main/com/mongodb/internal/authentication/package-info.java
+++ b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java
@@ -15,7 +15,6 @@
  */
 
 /**
- * This package contains cluster and connection event related classes
  */
 
 @NonNullApi
diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java
index acf75a3b1e8..fd46261a6df 100644
--- a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java
+++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java
@@ -18,26 +18,22 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext;
 import com.mongodb.internal.connection.Server;
 import com.mongodb.internal.selector.ReadPreferenceServerSelector;
 import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector;
 import com.mongodb.internal.selector.ServerAddressSelector;
 import com.mongodb.internal.selector.WritableServerSelector;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 import com.mongodb.selector.ServerSelector;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 /**
  * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a
@@ -49,9 +45,6 @@ public class AsyncClusterBinding extends AbstractReferenceCounted implements Asy
     private final Cluster cluster;
     private final ReadPreference readPreference;
     private final ReadConcern readConcern;
-    @Nullable
-    private final ServerApi serverApi;
-    private final RequestContext requestContext;
     private final OperationContext operationContext;
 
     /**
@@ -60,18 +53,15 @@ public class AsyncClusterBinding extends AbstractReferenceCounted implements Asy
      * @param cluster        a non-null Cluster which will be used to select a server to bind to
      * @param readPreference a non-null ReadPreference for read operations
      * @param readConcern    a non-null read concern
-     * @param serverApi      a server API, which may be null
-     * @param requestContext the request context
+     * @param operationContext the operation context
      * <p>This class is not part of the public API and may be removed or changed at any time</p>
      */
     public AsyncClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern,
-            @Nullable final ServerApi serverApi, final RequestContext requestContext) {
+            final OperationContext operationContext) {
         this.cluster = notNull("cluster", cluster);
         this.readPreference = notNull("readPreference", readPreference);
-        this.readConcern = (notNull("readConcern", readConcern));
-        this.serverApi = serverApi;
-        this.requestContext = notNull("requestContext", requestContext);
-        operationContext = new OperationContext();
+        this.readConcern = notNull("readConcern", readConcern);
+        this.operationContext = notNull("operationContext", operationContext);
     }
 
     @Override
@@ -85,22 +75,6 @@ public ReadPreference getReadPreference() {
         return readPreference;
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return new ReadConcernAwareNoOpSessionContext(readConcern);
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return requestContext;
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return operationContext;
@@ -163,6 +137,7 @@ private AsyncClusterBindingConnectionSource(final Server server, final ServerDes
             this.server = server;
             this.serverDescription = serverDescription;
             this.appliedReadPreference = appliedReadPreference;
+            operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos()));
             AsyncClusterBinding.this.retain();
         }
 
@@ -171,22 +146,6 @@ public ServerDescription getServerDescription() {
             return serverDescription;
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return new ReadConcernAwareNoOpSessionContext(readConcern);
-        }
-
-        @Override
-        @Nullable
-        public ServerApi getServerApi() {
-            return serverApi;
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return requestContext;
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return operationContext;
diff --git a/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java
index c98e88232ba..c10f0fb16ac 100644
--- a/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java
+++ b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java
@@ -16,23 +16,18 @@
 
 package com.mongodb.internal.binding;
 
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public interface BindingContext {
-    SessionContext getSessionContext();
-
-    @Nullable
-    ServerApi getServerApi();
-
-    RequestContext getRequestContext();
 
+    /**
+     * Note: Will return the same operation context if called multiple times.
+     *
+     * @return the operation context for the binding context.
+     */
     OperationContext getOperationContext();
 }
diff --git a/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java
index a2223d02014..cd3f8473bbb 100644
--- a/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java
+++ b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java
@@ -18,25 +18,21 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext;
 import com.mongodb.internal.connection.Server;
 import com.mongodb.internal.connection.ServerTuple;
 import com.mongodb.internal.selector.ReadPreferenceServerSelector;
 import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector;
 import com.mongodb.internal.selector.ServerAddressSelector;
 import com.mongodb.internal.selector.WritableServerSelector;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 /**
  * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a
@@ -48,27 +44,21 @@ public class ClusterBinding extends AbstractReferenceCounted implements ClusterA
     private final Cluster cluster;
     private final ReadPreference readPreference;
     private final ReadConcern readConcern;
-    @Nullable
-    private final ServerApi serverApi;
-    private final RequestContext requestContext;
     private final OperationContext operationContext;
 
     /**
      * Creates an instance.
-     * @param cluster        a non-null Cluster which will be used to select a server to bind to
-     * @param readPreference a non-null ReadPreference for read operations
-     * @param readConcern    a non-null read concern
-     * @param serverApi      a server API, which may be null
-     * @param requestContext the request context
+     * @param cluster          a non-null Cluster which will be used to select a server to bind to
+     * @param readPreference   a non-null ReadPreference for read operations
+     * @param readConcern      a non-null read concern
+     * @param operationContext the operation context
      */
     public ClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern,
-                          @Nullable final ServerApi serverApi, final RequestContext requestContext) {
+                          final OperationContext operationContext) {
         this.cluster = notNull("cluster", cluster);
         this.readPreference = notNull("readPreference", readPreference);
         this.readConcern = notNull("readConcern", readConcern);
-        this.serverApi = serverApi;
-        this.requestContext = notNull("requestContext", requestContext);
-        operationContext = new OperationContext();
+        this.operationContext = notNull("operationContext", operationContext);
     }
 
     @Override
@@ -82,22 +72,6 @@ public ReadPreference getReadPreference() {
         return readPreference;
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return new ReadConcernAwareNoOpSessionContext(readConcern);
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return requestContext;
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return operationContext;
@@ -140,6 +114,7 @@ private ClusterBindingConnectionSource(final ServerTuple serverTuple, final Read
             this.server = serverTuple.getServer();
             this.serverDescription = serverTuple.getServerDescription();
             this.appliedReadPreference = appliedReadPreference;
+            operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos()));
             ClusterBinding.this.retain();
         }
 
@@ -148,26 +123,11 @@ public ServerDescription getServerDescription() {
             return serverDescription;
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return new ReadConcernAwareNoOpSessionContext(readConcern);
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return operationContext;
         }
 
-        @Override
-        public ServerApi getServerApi() {
-            return serverApi;
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return requestContext;
-        }
-
         @Override
         public ReadPreference getReadPreference() {
             return appliedReadPreference;
diff --git a/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java
index 47bb2be22fb..7d7e948c344 100644
--- a/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java
+++ b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java
@@ -17,18 +17,13 @@
 package com.mongodb.internal.binding;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
-import com.mongodb.ServerApi;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.Connection;
-import com.mongodb.internal.connection.NoOpSessionContext;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.ServerTuple;
 import com.mongodb.internal.selector.ServerAddressSelector;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -40,25 +35,18 @@
 public class SingleServerBinding extends AbstractReferenceCounted implements ReadWriteBinding {
     private final Cluster cluster;
     private final ServerAddress serverAddress;
-    @Nullable
-    private final ServerApi serverApi;
-    private final RequestContext requestContext;
     private final OperationContext operationContext;
 
     /**
      * Creates an instance, defaulting to {@link com.mongodb.ReadPreference#primary()} for reads.
      * @param cluster       a non-null  Cluster which will be used to select a server to bind to
      * @param serverAddress a non-null  address of the server to bind to
-     * @param serverApi     the server API, which may be null
-     * @param requestContext the request context, which may not be null
+     * @param operationContext the operation context
      */
-    public SingleServerBinding(final Cluster cluster, final ServerAddress serverAddress, @Nullable final ServerApi serverApi,
-            final RequestContext requestContext) {
+    public SingleServerBinding(final Cluster cluster, final ServerAddress serverAddress, final OperationContext operationContext) {
         this.cluster = notNull("cluster", cluster);
         this.serverAddress = notNull("serverAddress", serverAddress);
-        this.serverApi = serverApi;
-        this.requestContext = notNull("requestContext", requestContext);
-        operationContext = new OperationContext();
+        this.operationContext = notNull("operationContext", operationContext);
     }
 
     @Override
@@ -81,22 +69,6 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final
         throw new UnsupportedOperationException();
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return NoOpSessionContext.INSTANCE;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return requestContext;
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return operationContext;
@@ -122,26 +94,11 @@ public ServerDescription getServerDescription() {
             return serverDescription;
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return NoOpSessionContext.INSTANCE;
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return operationContext;
         }
 
-        @Override
-        public ServerApi getServerApi() {
-            return serverApi;
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return requestContext;
-        }
-
         @Override
         public ReadPreference getReadPreference() {
             return ReadPreference.primary();
@@ -149,8 +106,10 @@ public ReadPreference getReadPreference() {
 
         @Override
         public Connection getConnection() {
-            return cluster.selectServer(new ServerAddressSelector(serverAddress), operationContext)
-                    .getServer().getConnection(operationContext);
+            return cluster
+                    .selectServer(new ServerAddressSelector(serverAddress), operationContext)
+                    .getServer()
+                    .getConnection(operationContext);
         }
 
         @Override
diff --git a/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java b/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java
deleted file mode 100644
index e0e7f40ade0..00000000000
--- a/driver-core/src/main/com/mongodb/internal/binding/StaticBindingContext.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright 2008-present MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.mongodb.internal.binding;
-
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
-import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
-
-/**
- *
- * <p>This class is not part of the public API and may be removed or changed at any time</p>
- */
-public class StaticBindingContext implements BindingContext {
-    private final SessionContext sessionContext;
-    private final ServerApi serverApi;
-    private final RequestContext requestContext;
-    private final OperationContext operationContext;
-
-    public StaticBindingContext(final SessionContext sessionContext, @Nullable final ServerApi serverApi,
-            final RequestContext requestContext, final OperationContext operationContext) {
-        this.sessionContext = sessionContext;
-        this.serverApi = serverApi;
-        this.requestContext = requestContext;
-        this.operationContext = operationContext;
-    }
-
-    @Override
-    public SessionContext getSessionContext() {
-        return sessionContext;
-    }
-
-    @Nullable
-    @Override
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return requestContext;
-    }
-
-    @Override
-    public OperationContext getOperationContext() {
-        return operationContext;
-    }
-}
diff --git a/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java
index 3a87434e9ed..1c7f3ef9858 100644
--- a/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java
+++ b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java
@@ -17,6 +17,9 @@
 package com.mongodb.internal.client.model;
 
 import com.mongodb.CursorType;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonString;
@@ -54,6 +57,7 @@ public final class FindOptions {
     private boolean returnKey;
     private boolean showRecordId;
     private Boolean allowDiskUse;
+    private TimeoutMode timeoutMode;
 
     /**
      * Construct a new instance.
@@ -66,7 +70,8 @@ public FindOptions() {
             final int batchSize, final int limit, final Bson projection, final long maxTimeMS, final long maxAwaitTimeMS, final int skip,
             final Bson sort, final CursorType cursorType, final boolean noCursorTimeout, final boolean partial,
             final Collation collation, final BsonValue comment, final Bson hint, final String hintString, final Bson variables,
-            final Bson max, final Bson min, final boolean returnKey, final boolean showRecordId, final Boolean allowDiskUse) {
+            final Bson max, final Bson min, final boolean returnKey, final boolean showRecordId, final Boolean allowDiskUse,
+            final TimeoutMode timeoutMode) {
         this.batchSize = batchSize;
         this.limit = limit;
         this.projection = projection;
@@ -87,12 +92,13 @@ public FindOptions() {
         this.returnKey = returnKey;
         this.showRecordId = showRecordId;
         this.allowDiskUse = allowDiskUse;
+        this.timeoutMode = timeoutMode;
     }
     //CHECKSTYLE:ON
 
     public FindOptions withBatchSize(final int batchSize) {
         return new FindOptions(batchSize, limit, projection, maxTimeMS, maxAwaitTimeMS, skip, sort, cursorType, noCursorTimeout,
-                partial, collation, comment, hint, hintString, variables, max, min, returnKey, showRecordId, allowDiskUse);
+                partial, collation, comment, hint, hintString, variables, max, min, returnKey, showRecordId, allowDiskUse, timeoutMode);
     }
 
     /**
@@ -224,6 +230,41 @@ public FindOptions batchSize(final int batchSize) {
         return this;
     }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@code MongoDatabase} or via {@code MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public FindOptions timeoutMode(final TimeoutMode timeoutMode) {
+        this.timeoutMode = timeoutMode;
+        return this;
+    }
+
+    /**
+     * @see #timeoutMode(TimeoutMode)
+     * @return timeout mode
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public TimeoutMode getTimeoutMode() {
+        return timeoutMode;
+    }
+
     /**
      * Gets a document describing the fields to return for all matching documents.
      *
diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java
index e1d7d6946cb..137a2f266e3 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java
@@ -24,8 +24,10 @@
 import com.mongodb.connection.ClusterType;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.event.ServerDescriptionChangedEvent;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.types.ObjectId;
 
@@ -125,7 +127,8 @@ public void close() {
     }
 
     @Override
-    public ServersSnapshot getServersSnapshot() {
+    public ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout,
+                                              final TimeoutContext timeoutContext) {
         isTrue("is open", !isClosed());
         Map<ServerAddress, ServerTuple> nonAtomicSnapshot = new HashMap<>(addressToServerTupleMap);
         return serverAddress -> {
diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java
new file mode 100644
index 00000000000..ba200933860
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.connection;
+
+import com.mongodb.internal.session.SessionContext;
+
+import static com.mongodb.internal.ExceptionUtils.isMongoSocketException;
+import static com.mongodb.internal.ExceptionUtils.isOperationTimeoutFromSocketException;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+public abstract class AbstractProtocolExecutor implements ProtocolExecutor {
+
+    protected boolean shouldMarkSessionDirty(final Throwable e, final SessionContext sessionContext) {
+        if (!sessionContext.hasSession()) {
+            return false;
+        }
+        return isMongoSocketException(e) || isOperationTimeoutFromSocketException(e);
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java
index 0ba1985b4b0..2891bc28732 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java
@@ -20,7 +20,6 @@
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -46,12 +45,12 @@ public interface AsyncConnection extends ReferenceCounted {
     ConnectionDescription getDescription();
 
     <T> void commandAsync(String database, BsonDocument command, FieldNameValidator fieldNameValidator,
-            @Nullable ReadPreference readPreference, Decoder<T> commandResultDecoder, BindingContext context,
+            @Nullable ReadPreference readPreference, Decoder<T> commandResultDecoder, OperationContext operationContext,
             SingleResultCallback<T> callback);
 
     <T> void commandAsync(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator,
             @Nullable ReadPreference readPreference, Decoder<T> commandResultDecoder,
-            BindingContext context, boolean responseExpected, @Nullable SplittablePayload payload,
+            OperationContext operationContext, boolean responseExpected, @Nullable SplittablePayload payload,
             @Nullable FieldNameValidator payloadFieldNameValidator, SingleResultCallback<T> callback);
 
     void markAsPinned(Connection.PinningMode pinningMode);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java
index 6f2b7e5c172..bbb18497ee4 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java
@@ -20,6 +20,7 @@
 import com.mongodb.MongoInternalException;
 import com.mongodb.MongoSocketReadException;
 import com.mongodb.MongoSocketReadTimeoutException;
+import com.mongodb.MongoSocketWriteTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.connection.AsyncCompletionHandler;
 import com.mongodb.connection.SocketSettings;
@@ -86,14 +87,15 @@ protected void setChannel(final ExtendedAsynchronousByteChannel channel) {
     }
 
     @Override
-    public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler<Void> handler) {
+    public void writeAsync(final List<ByteBuf> buffers, final OperationContext operationContext,
+            final AsyncCompletionHandler<Void> handler) {
         AsyncWritableByteChannelAdapter byteChannel = new AsyncWritableByteChannelAdapter();
         Iterator<ByteBuf> iter = buffers.iterator();
-        pipeOneBuffer(byteChannel, iter.next(), new AsyncCompletionHandler<Void>() {
+        pipeOneBuffer(byteChannel, iter.next(), operationContext, new AsyncCompletionHandler<Void>() {
             @Override
             public void completed(@Nullable final Void t) {
                 if (iter.hasNext()) {
-                    pipeOneBuffer(byteChannel, iter.next(), this);
+                    pipeOneBuffer(byteChannel, iter.next(), operationContext, this);
                 } else {
                     handler.completed(null);
                 }
@@ -107,46 +109,31 @@ public void failed(final Throwable t) {
     }
 
     @Override
-    public void readAsync(final int numBytes, final AsyncCompletionHandler<ByteBuf> handler) {
-        readAsync(numBytes, 0, handler);
-    }
-
-    private void readAsync(final int numBytes, final int additionalTimeout, final AsyncCompletionHandler<ByteBuf> handler) {
+    public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler<ByteBuf> handler) {
         ByteBuf buffer = bufferProvider.getBuffer(numBytes);
 
-        int timeout = settings.getReadTimeout(MILLISECONDS);
-        if (timeout > 0 && additionalTimeout > 0) {
-            timeout += additionalTimeout;
-        }
-
-        getChannel().read(buffer.asNIO(), timeout, MILLISECONDS, null, new BasicCompletionHandler(buffer, handler));
+        long timeout = operationContext.getTimeoutContext().getReadTimeoutMS();
+        getChannel().read(buffer.asNIO(), timeout, MILLISECONDS, null, new BasicCompletionHandler(buffer, operationContext, handler));
     }
 
     @Override
-    public void open() throws IOException {
+    public void open(final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<Void> handler = new FutureAsyncCompletionHandler<>();
-        openAsync(handler);
+        openAsync(operationContext, handler);
         handler.getOpen();
     }
 
     @Override
-    public void write(final List<ByteBuf> buffers) throws IOException {
+    public void write(final List<ByteBuf> buffers, final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<Void> handler = new FutureAsyncCompletionHandler<>();
-        writeAsync(buffers, handler);
+        writeAsync(buffers, operationContext, handler);
         handler.getWrite();
     }
 
     @Override
-    public ByteBuf read(final int numBytes) throws IOException {
+    public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<ByteBuf> handler = new FutureAsyncCompletionHandler<>();
-        readAsync(numBytes, handler);
-        return handler.getRead();
-    }
-
-    @Override
-    public ByteBuf read(final int numBytes, final int additionalTimeout) throws IOException {
-        FutureAsyncCompletionHandler<ByteBuf> handler = new FutureAsyncCompletionHandler<>();
-        readAsync(numBytes, additionalTimeout, handler);
+        readAsync(numBytes, operationContext, handler);
         return handler.getRead();
     }
 
@@ -182,12 +169,12 @@ public ByteBuf getBuffer(final int size) {
     }
 
     private void pipeOneBuffer(final AsyncWritableByteChannelAdapter byteChannel, final ByteBuf byteBuffer,
-                               final AsyncCompletionHandler<Void> outerHandler) {
-        byteChannel.write(byteBuffer.asNIO(), new AsyncCompletionHandler<Void>() {
+            final OperationContext operationContext, final AsyncCompletionHandler<Void> outerHandler) {
+        byteChannel.write(byteBuffer.asNIO(), operationContext, new AsyncCompletionHandler<Void>() {
             @Override
             public void completed(@Nullable final Void t) {
                 if (byteBuffer.hasRemaining()) {
-                    byteChannel.write(byteBuffer.asNIO(), this);
+                    byteChannel.write(byteBuffer.asNIO(), operationContext, this);
                 } else {
                     outerHandler.completed(null);
                 }
@@ -201,8 +188,9 @@ public void failed(final Throwable t) {
     }
 
     private class AsyncWritableByteChannelAdapter {
-        void write(final ByteBuffer src, final AsyncCompletionHandler<Void> handler) {
-            getChannel().write(src, null, new AsyncWritableByteChannelAdapter.WriteCompletionHandler(handler));
+        void write(final ByteBuffer src, final OperationContext operationContext, final AsyncCompletionHandler<Void> handler) {
+            getChannel().write(src, operationContext.getTimeoutContext().getWriteTimeoutMS(), MILLISECONDS, null,
+                    new AsyncWritableByteChannelAdapter.WriteCompletionHandler(handler));
         }
 
         private class WriteCompletionHandler extends BaseCompletionHandler<Void, Integer, Object> {
@@ -218,19 +206,26 @@ public void completed(final Integer result, final Object attachment) {
             }
 
             @Override
-            public void failed(final Throwable exc, final Object attachment) {
+            public void failed(final Throwable t, final Object attachment) {
                 AsyncCompletionHandler<Void> localHandler = getHandlerAndClear();
-                localHandler.failed(exc);
+                if (t instanceof InterruptedByTimeoutException) {
+                    localHandler.failed(new MongoSocketWriteTimeoutException("Timeout while writing message", serverAddress, t));
+                } else {
+                    localHandler.failed(t);
+                }
             }
         }
     }
 
     private final class BasicCompletionHandler extends BaseCompletionHandler<ByteBuf, Integer, Void> {
         private final AtomicReference<ByteBuf> byteBufReference;
+        private final OperationContext operationContext;
 
-        private BasicCompletionHandler(final ByteBuf dst, final AsyncCompletionHandler<ByteBuf> handler) {
+        private BasicCompletionHandler(final ByteBuf dst, final OperationContext operationContext,
+                final AsyncCompletionHandler<ByteBuf> handler) {
             super(handler);
             this.byteBufReference = new AtomicReference<>(dst);
+            this.operationContext = operationContext;
         }
 
         @Override
@@ -244,8 +239,8 @@ public void completed(final Integer result, final Void attachment) {
                 localByteBuf.flip();
                 localHandler.completed(localByteBuf);
             } else {
-                getChannel().read(localByteBuf.asNIO(), settings.getReadTimeout(MILLISECONDS), MILLISECONDS, null,
-                        new BasicCompletionHandler(localByteBuf, localHandler));
+                getChannel().read(localByteBuf.asNIO(), operationContext.getTimeoutContext().getReadTimeoutMS(), MILLISECONDS, null,
+                        new BasicCompletionHandler(localByteBuf, operationContext, localHandler));
             }
         }
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java
index cb1e2a54868..4818b1f7ac4 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java
@@ -56,7 +56,7 @@ public AsynchronousSocketChannelStream(final ServerAddress serverAddress, final
     }
 
     @Override
-    public void openAsync(final AsyncCompletionHandler<Void> handler) {
+    public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler<Void> handler) {
         isTrue("unopened", getChannel() == null);
         Queue<SocketAddress> socketAddressQueue;
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java
index 232eeb45049..cd1809966b0 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java
@@ -96,19 +96,20 @@ <T> T getNonNullMechanismProperty(final String key, @Nullable final T defaultVal
 
     }
 
-    abstract void authenticate(InternalConnection connection, ConnectionDescription connectionDescription);
+    abstract void authenticate(InternalConnection connection, ConnectionDescription connectionDescription,
+            OperationContext operationContext);
 
     abstract void authenticateAsync(InternalConnection connection, ConnectionDescription connectionDescription,
-                                    SingleResultCallback<Void> callback);
+            OperationContext operationContext, SingleResultCallback<Void> callback);
 
-    public void reauthenticate(final InternalConnection connection) {
-        authenticate(connection, connection.getDescription());
+    public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) {
+        authenticate(connection, connection.getDescription(), operationContext);
     }
 
-    public void reauthenticateAsync(final InternalConnection connection, final SingleResultCallback<Void> callback) {
+    public void reauthenticateAsync(final InternalConnection connection, final OperationContext operationContext,
+                                    final SingleResultCallback<Void> callback) {
         beginAsync().thenRun((c) -> {
-            authenticateAsync(connection, connection.getDescription(), c);
+            authenticateAsync(connection, connection.getDescription(), operationContext, c);
         }).finish(callback);
     }
-
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java
index 292822244b7..df3e4d1c1fe 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java
@@ -19,6 +19,8 @@
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
 import com.mongodb.MongoIncompatibleDriverException;
+import com.mongodb.MongoInterruptedException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.UnixServerAddress;
@@ -31,6 +33,7 @@
 import com.mongodb.event.ClusterDescriptionChangedEvent;
 import com.mongodb.event.ClusterListener;
 import com.mongodb.event.ClusterOpeningEvent;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.connection.OperationContext.ServerDeprioritization;
@@ -42,6 +45,7 @@
 import com.mongodb.internal.selector.AtMostTwoRandomServerSelector;
 import com.mongodb.internal.selector.LatencyMinimizingServerSelector;
 import com.mongodb.internal.selector.MinimumOperationCountServerSelector;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.selector.CompositeServerSelector;
 import com.mongodb.selector.ServerSelector;
@@ -78,7 +82,7 @@
 import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_DESCRIPTION;
 import static com.mongodb.internal.logging.LogMessage.Level.DEBUG;
 import static com.mongodb.internal.logging.LogMessage.Level.INFO;
-import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -121,58 +125,46 @@ public ClusterClock getClock() {
     public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) {
         isTrue("open", !isClosed());
 
-        try {
-            CountDownLatch currentPhase = phase.get();
-            ClusterDescription curDescription = description;
-            logServerSelectionStarted(clusterId, operationContext, serverSelector, curDescription);
-            ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization();
-            ServerTuple serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization);
-
-            boolean selectionWaitingLogged = false;
-
-            long startTimeNanos = System.nanoTime();
-            long curTimeNanos = startTimeNanos;
-            Long maxWaitTimeNanos = getMaxWaitTimeNanos();
-
-            while (true) {
-                if (!curDescription.isCompatibleWithDriver()) {
-                    throw createAndLogIncompatibleException(operationContext, serverSelector, curDescription);
-                }
-
-                if (serverTuple != null) {
-                    ServerAddress serverAddress = serverTuple.getServerDescription().getAddress();
-                    logServerSelectionSucceeded(
-                            clusterId, operationContext, serverAddress, serverSelector, curDescription);
-                    serverDeprioritization.updateCandidate(serverAddress);
-                    return serverTuple;
-                }
-
-                Long remainingTimeNanos = maxWaitTimeNanos == null ? null : maxWaitTimeNanos - (curTimeNanos - startTimeNanos);
-
-                if (remainingTimeNanos != null && remainingTimeNanos <= 0) {
-                    throw createAndLogTimeoutException(operationContext, serverSelector, curDescription);
-                }
-
-                if (!selectionWaitingLogged) {
-                    logServerSelectionWaiting(clusterId, operationContext, remainingTimeNanos, serverSelector, curDescription);
-                    selectionWaitingLogged = true;
-                }
-
-                connect();
-
-                currentPhase.await(
-                        remainingTimeNanos == null ? getMinWaitTimeNanos() : Math.min(remainingTimeNanos, getMinWaitTimeNanos()),
-                        NANOSECONDS);
-
-                curTimeNanos = System.nanoTime();
+        ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization();
+        boolean selectionWaitingLogged = false;
+        Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout();
+        logServerSelectionStarted(clusterId, operationContext.getId(), serverSelector, description);
+        while (true) {
+            CountDownLatch currentPhaseLatch = phase.get();
+            ClusterDescription currentDescription = description;
+            ServerTuple serverTuple = createCompleteSelectorAndSelectServer(
+                    serverSelector, currentDescription, serverDeprioritization,
+                    computedServerSelectionTimeout, operationContext.getTimeoutContext());
+
+            if (!currentDescription.isCompatibleWithDriver()) {
+                logAndThrowIncompatibleException(operationContext.getId(), serverSelector, currentDescription);
+            }
+            if (serverTuple != null) {
+                ServerAddress serverAddress = serverTuple.getServerDescription().getAddress();
+                logServerSelectionSucceeded(
+                        clusterId,
+                        operationContext.getId(),
+                        serverAddress,
+                        serverSelector,
+                        currentDescription);
+                serverDeprioritization.updateCandidate(serverAddress);
+                return serverTuple;
+            }
+            computedServerSelectionTimeout.onExpired(() ->
+                    logAndThrowTimeoutException(operationContext, serverSelector, currentDescription));
 
-                currentPhase = phase.get();
-                curDescription = description;
-                serverTuple = createCompleteSelectorAndSelectServer(serverSelector, curDescription, serverDeprioritization);
+            if (!selectionWaitingLogged) {
+                logServerSelectionWaiting(clusterId, operationContext.getId(), computedServerSelectionTimeout, serverSelector, currentDescription);
+                selectionWaitingLogged = true;
             }
+            connect();
+
+            Timeout heartbeatLimitedTimeout = Timeout.earliest(
+                    computedServerSelectionTimeout,
+                    startMinWaitHeartbeatTimeout());
 
-        } catch (InterruptedException e) {
-            throw interruptAndCreateMongoInterruptedException(format("Interrupted while waiting for a server that matches %s", serverSelector), e);
+            heartbeatLimitedTimeout.awaitOn(currentPhaseLatch,
+                    () -> format("waiting for a server that matches %s", serverSelector));
         }
     }
 
@@ -181,11 +173,18 @@ public void selectServerAsync(final ServerSelector serverSelector, final Operati
             final SingleResultCallback<ServerTuple> callback) {
         isTrue("open", !isClosed());
 
+        Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout();
+        ServerSelectionRequest request = new ServerSelectionRequest(
+                serverSelector, operationContext, computedServerSelectionTimeout, callback);
+
         CountDownLatch currentPhase = phase.get();
         ClusterDescription currentDescription = description;
 
-        logServerSelectionStarted(clusterId, operationContext, serverSelector, currentDescription);
-        ServerSelectionRequest request = new ServerSelectionRequest(operationContext, serverSelector, getMaxWaitTimeNanos(), callback);
+        logServerSelectionStarted(
+                clusterId,
+                operationContext.getId(),
+                serverSelector,
+                currentDescription);
 
         if (!handleServerSelectionRequest(request, currentPhase, currentDescription)) {
             notifyWaitQueueHandler(request);
@@ -257,50 +256,60 @@ private void updatePhase() {
         withLock(() -> phase.getAndSet(new CountDownLatch(1)).countDown());
     }
 
-    @Nullable
-    private Long getMaxWaitTimeNanos() {
-        if (settings.getServerSelectionTimeout(NANOSECONDS) < 0) {
-            return null;
-        }
-        return settings.getServerSelectionTimeout(NANOSECONDS);
+    private Timeout startMinWaitHeartbeatTimeout() {
+        long minHeartbeatFrequency = serverFactory.getSettings().getMinHeartbeatFrequency(NANOSECONDS);
+        minHeartbeatFrequency = Math.max(0, minHeartbeatFrequency);
+        return Timeout.expiresIn(minHeartbeatFrequency, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
     }
 
-    private long getMinWaitTimeNanos() {
-        return serverFactory.getSettings().getMinHeartbeatFrequency(NANOSECONDS);
-    }
+    private boolean handleServerSelectionRequest(
+            final ServerSelectionRequest request, final CountDownLatch currentPhase,
+            final ClusterDescription description) {
 
-    private boolean handleServerSelectionRequest(final ServerSelectionRequest request, final CountDownLatch currentPhase,
-                                                 final ClusterDescription description) {
         try {
+            OperationContext operationContext = request.getOperationContext();
+            long operationId = operationContext.getId();
             if (currentPhase != request.phase) {
                 CountDownLatch prevPhase = request.phase;
                 request.phase = currentPhase;
                 if (!description.isCompatibleWithDriver()) {
-                    request.onResult(null, createAndLogIncompatibleException(request.operationContext, request.originalSelector, description));
-                    return true;
+                    logAndThrowIncompatibleException(operationId, request.originalSelector, description);
                 }
 
+
                 ServerDeprioritization serverDeprioritization = request.operationContext.getServerDeprioritization();
-                ServerTuple serverTuple = createCompleteSelectorAndSelectServer(request.originalSelector, description, serverDeprioritization);
+                ServerTuple serverTuple = createCompleteSelectorAndSelectServer(
+                        request.originalSelector,
+                        description,
+                        serverDeprioritization,
+                        request.getTimeout(),
+                        operationContext.getTimeoutContext());
+
                 if (serverTuple != null) {
                     ServerAddress serverAddress = serverTuple.getServerDescription().getAddress();
-                    logServerSelectionSucceeded(clusterId, request.operationContext, serverAddress,
-                            request.originalSelector, description);
+                    logServerSelectionSucceeded(
+                            clusterId,
+                            operationId,
+                            serverAddress,
+                            request.originalSelector,
+                            description);
                     serverDeprioritization.updateCandidate(serverAddress);
                     request.onResult(serverTuple, null);
                     return true;
                 }
                 if (prevPhase == null) {
                     logServerSelectionWaiting(
-                            clusterId, request.operationContext, request.getRemainingTime(), request.originalSelector, description);
+                            clusterId,
+                            operationId,
+                            request.getTimeout(),
+                            request.originalSelector,
+                            description);
                 }
             }
 
-            if (request.timedOut()) {
-                request.onResult(null, createAndLogTimeoutException(request.operationContext, request.originalSelector, description));
-                return true;
-            }
-
+            Timeout.onExistsAndExpired(request.getTimeout(), () -> {
+                logAndThrowTimeoutException(operationContext, request.originalSelector, description);
+            });
             return false;
         } catch (Exception e) {
             request.onResult(null, e);
@@ -312,9 +321,15 @@ private boolean handleServerSelectionRequest(final ServerSelectionRequest reques
     private ServerTuple createCompleteSelectorAndSelectServer(
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription,
-            final ServerDeprioritization serverDeprioritization) {
+            final ServerDeprioritization serverDeprioritization,
+            final Timeout serverSelectionTimeout,
+            final TimeoutContext timeoutContext) {
         return createCompleteSelectorAndSelectServer(
-                serverSelector, clusterDescription, getServersSnapshot(), serverDeprioritization, settings);
+                serverSelector,
+                clusterDescription,
+                getServersSnapshot(serverSelectionTimeout, timeoutContext),
+                serverDeprioritization,
+                settings);
     }
 
     @Nullable
@@ -372,13 +387,13 @@ protected ClusterableServer createServer(final ServerAddress serverAddress) {
         return serverFactory.create(this, serverAddress);
     }
 
-    private MongoIncompatibleDriverException createAndLogIncompatibleException(
-            final OperationContext operationContext,
+    private void logAndThrowIncompatibleException(
+            final long operationId,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
         MongoIncompatibleDriverException exception = createIncompatibleException(clusterDescription);
-        logServerSelectionFailed(clusterId, operationContext, exception, serverSelector, clusterDescription);
-        return exception;
+        logServerSelectionFailed(clusterId, operationId, exception, serverSelector, clusterDescription);
+        throw exception;
     }
 
     private MongoIncompatibleDriverException createIncompatibleException(final ClusterDescription curDescription) {
@@ -400,34 +415,36 @@ private MongoIncompatibleDriverException createIncompatibleException(final Clust
         return new MongoIncompatibleDriverException(message, curDescription);
     }
 
-    private MongoException createAndLogTimeoutException(
+    private void logAndThrowTimeoutException(
             final OperationContext operationContext,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
-        MongoTimeoutException exception = new MongoTimeoutException(format(
+        String message = format(
                 "Timed out while waiting for a server that matches %s. Client view of cluster state is %s",
-                serverSelector, clusterDescription.getShortDescription()));
-        logServerSelectionFailed(clusterId, operationContext, exception, serverSelector, clusterDescription);
-        return exception;
+                serverSelector, clusterDescription.getShortDescription());
+
+        MongoTimeoutException exception = operationContext.getTimeoutContext().hasTimeoutMS()
+                ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message);
+
+        logServerSelectionFailed(clusterId, operationContext.getId(), exception, serverSelector, clusterDescription);
+        throw exception;
     }
 
     private static final class ServerSelectionRequest {
-        private final OperationContext operationContext;
         private final ServerSelector originalSelector;
-        @Nullable
-        private final Long maxWaitTimeNanos;
         private final SingleResultCallback<ServerTuple> callback;
-        private final long startTimeNanos = System.nanoTime();
+        private final OperationContext operationContext;
+        private final Timeout timeout;
         private CountDownLatch phase;
 
-        ServerSelectionRequest(final OperationContext operationContext,
-                               final ServerSelector serverSelector,
-                               @Nullable
-                               final Long maxWaitTimeNanos,
-                               final SingleResultCallback<ServerTuple> callback) {
-            this.operationContext = operationContext;
+        ServerSelectionRequest(
+                final ServerSelector serverSelector,
+                final OperationContext operationContext,
+                final Timeout timeout,
+                final SingleResultCallback<ServerTuple> callback) {
             this.originalSelector = serverSelector;
-            this.maxWaitTimeNanos = maxWaitTimeNanos;
+            this.operationContext = operationContext;
+            this.timeout = timeout;
             this.callback = callback;
         }
 
@@ -439,14 +456,12 @@ void onResult(@Nullable final ServerTuple serverTuple, @Nullable final Throwable
             }
         }
 
-        boolean timedOut() {
-            Long remainingTimeNanos = getRemainingTime();
-            return remainingTimeNanos != null && remainingTimeNanos <= 0;
+        Timeout getTimeout() {
+            return timeout;
         }
 
-        @Nullable
-        Long getRemainingTime() {
-            return maxWaitTimeNanos == null ? null : maxWaitTimeNanos - (System.nanoTime() - startTimeNanos);
+        public OperationContext getOperationContext() {
+            return operationContext;
         }
     }
 
@@ -477,31 +492,37 @@ private void stopWaitQueueHandler() {
     }
 
     private final class WaitQueueHandler implements Runnable {
+
+        WaitQueueHandler() {
+        }
+
         public void run() {
             while (!isClosed) {
                 CountDownLatch currentPhase = phase.get();
                 ClusterDescription curDescription = description;
-                long waitTimeNanos = Long.MAX_VALUE;
 
+                Timeout timeout = Timeout.infinite();
+                boolean someWaitersNotSatisfied = false;
                 for (Iterator<ServerSelectionRequest> iter = waitQueue.iterator(); iter.hasNext();) {
-                    ServerSelectionRequest nextRequest = iter.next();
-                    if (handleServerSelectionRequest(nextRequest, currentPhase, curDescription)) {
+                    ServerSelectionRequest currentRequest = iter.next();
+                    if (handleServerSelectionRequest(currentRequest, currentPhase, curDescription)) {
                         iter.remove();
                     } else {
-                        Long remainingTimeNanos = nextRequest.getRemainingTime();
-                        long minWaitTimeNanos = Math.min(getMinWaitTimeNanos(), waitTimeNanos);
-                        waitTimeNanos = remainingTimeNanos == null ? minWaitTimeNanos : Math.min(remainingTimeNanos, minWaitTimeNanos);
+                        someWaitersNotSatisfied = true;
+                        timeout = Timeout.earliest(
+                                timeout,
+                                currentRequest.getTimeout(),
+                                startMinWaitHeartbeatTimeout());
                     }
                 }
 
-                // if there are any waiters that were not satisfied, connect
-                if (waitTimeNanos < Long.MAX_VALUE) {
+                if (someWaitersNotSatisfied) {
                     connect();
                 }
 
                 try {
-                    currentPhase.await(waitTimeNanos, NANOSECONDS);
-                } catch (InterruptedException closed) {
+                    timeout.awaitOn(currentPhase, () -> "ignored");
+                } catch (MongoInterruptedException closed) {
                     // The cluster has been closed and the while loop will exit.
                 }
             }
@@ -515,7 +536,7 @@ public void run() {
 
     static void logServerSelectionStarted(
             final ClusterId clusterId,
-            final OperationContext operationContext,
+            final long operationId,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
         if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) {
@@ -523,7 +544,7 @@ static void logServerSelectionStarted(
                     SERVER_SELECTION, DEBUG, "Server selection started", clusterId,
                     asList(
                             new Entry(OPERATION, null),
-                            new Entry(OPERATION_ID, operationContext.getId()),
+                            new Entry(OPERATION_ID, operationId),
                             new Entry(SELECTOR, serverSelector.toString()),
                             new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())),
                     "Server selection started for operation[ {}] with ID {}. Selector: {}, topology description: {}"));
@@ -532,9 +553,8 @@ static void logServerSelectionStarted(
 
     private static void logServerSelectionWaiting(
             final ClusterId clusterId,
-            final OperationContext operationContext,
-            @Nullable
-            final Long remainingTimeNanos,
+            final long operationId,
+            final Timeout timeout,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
         if (STRUCTURED_LOGGER.isRequired(INFO, clusterId)) {
@@ -542,8 +562,11 @@ private static void logServerSelectionWaiting(
                     SERVER_SELECTION, INFO, "Waiting for suitable server to become available", clusterId,
                     asList(
                             new Entry(OPERATION, null),
-                            new Entry(OPERATION_ID, operationContext.getId()),
-                            new Entry(REMAINING_TIME_MS, remainingTimeNanos == null ? null : NANOSECONDS.toMillis(remainingTimeNanos)),
+                            new Entry(OPERATION_ID, operationId),
+                            timeout.call(MILLISECONDS,
+                                    () -> new Entry(REMAINING_TIME_MS, "infinite"),
+                                    (ms) -> new Entry(REMAINING_TIME_MS, ms),
+                                    () -> new Entry(REMAINING_TIME_MS, 0L)),
                             new Entry(SELECTOR, serverSelector.toString()),
                             new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())),
                     "Waiting for server to become available for operation[ {}] with ID {}.[ Remaining time: {} ms.]"
@@ -553,7 +576,7 @@ private static void logServerSelectionWaiting(
 
     private static void logServerSelectionFailed(
             final ClusterId clusterId,
-            final OperationContext operationContext,
+            final long operationId,
             final MongoException failure,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
@@ -568,7 +591,7 @@ private static void logServerSelectionFailed(
                     SERVER_SELECTION, DEBUG, "Server selection failed", clusterId,
                     asList(
                             new Entry(OPERATION, null),
-                            new Entry(OPERATION_ID, operationContext.getId()),
+                            new Entry(OPERATION_ID, operationId),
                             new Entry(FAILURE, failureDescription),
                             new Entry(SELECTOR, serverSelector.toString()),
                             new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())),
@@ -578,7 +601,7 @@ private static void logServerSelectionFailed(
 
     static void logServerSelectionSucceeded(
             final ClusterId clusterId,
-            final OperationContext operationContext,
+            final long operationId,
             final ServerAddress serverAddress,
             final ServerSelector serverSelector,
             final ClusterDescription clusterDescription) {
@@ -587,7 +610,7 @@ static void logServerSelectionSucceeded(
                     SERVER_SELECTION, DEBUG, "Server selection succeeded", clusterId,
                     asList(
                             new Entry(OPERATION, null),
-                            new Entry(OPERATION_ID, operationContext.getId()),
+                            new Entry(OPERATION_ID, operationId),
                             new Entry(SERVER_HOST, serverAddress.getHost()),
                             new Entry(SERVER_PORT, serverAddress instanceof UnixServerAddress ? null : serverAddress.getPort()),
                             new Entry(SELECTOR, serverSelector.toString()),
diff --git a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java
index 358eb90a175..a6d4a026608 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java
@@ -19,11 +19,13 @@
 
 import com.mongodb.ServerAddress;
 import com.mongodb.annotations.ThreadSafe;
+import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.ClusterId;
+import com.mongodb.connection.ClusterSettings;
 import com.mongodb.event.ServerDescriptionChangedEvent;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.connection.ClusterDescription;
-import com.mongodb.connection.ClusterSettings;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.selector.ServerSelector;
 
@@ -41,7 +43,7 @@ public interface Cluster extends Closeable {
 
     ClusterId getClusterId();
 
-    ServersSnapshot getServersSnapshot();
+    ServersSnapshot getServersSnapshot(Timeout serverSelectionTimeout, TimeoutContext timeoutContext);
 
     /**
      * Get the current description of this cluster.
diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java
index dc0df6ac27e..31737d7b22b 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java
@@ -20,7 +20,6 @@
 import com.mongodb.MongoServerException;
 import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterConnectionMode;
-import com.mongodb.internal.IgnorableRequestContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.validator.NoOpFieldNameValidator;
 import com.mongodb.lang.Nullable;
@@ -44,27 +43,30 @@ public final class CommandHelper {
     static final String LEGACY_HELLO_LOWER = LEGACY_HELLO.toLowerCase(Locale.ROOT);
 
     static BsonDocument executeCommand(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode,
-                                       @Nullable final ServerApi serverApi, final InternalConnection internalConnection) {
-        return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection);
+            @Nullable final ServerApi serverApi, final InternalConnection internalConnection, final OperationContext operationContext) {
+        return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext);
     }
 
     static BsonDocument executeCommandWithoutCheckingForFailure(final String database, final BsonDocument command,
-                                                                final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi,
-                                                                final InternalConnection internalConnection) {
+            final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi,
+            final InternalConnection internalConnection, final OperationContext operationContext) {
         try {
-            return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection);
+            return executeCommand(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext);
         } catch (MongoServerException e) {
             return new BsonDocument();
         }
     }
 
-    static void executeCommandAsync(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode,
-                                    @Nullable final ServerApi serverApi, final InternalConnection internalConnection,
+    static void executeCommandAsync(final String database,
+                                    final BsonDocument command,
+                                    final ClusterConnectionMode clusterConnectionMode,
+                                    @Nullable final ServerApi serverApi,
+                                    final InternalConnection internalConnection,
+                                    final OperationContext operationContext,
                                     final SingleResultCallback<BsonDocument> callback) {
         internalConnection.sendAndReceiveAsync(
                 getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi),
-                new BsonDocumentCodec(),
-                NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), (result, t) -> {
+                new BsonDocumentCodec(), operationContext, (result, t) -> {
                     if (t != null) {
                         callback.onResult(null, t);
                     } else {
@@ -88,11 +90,15 @@ static boolean isCommandOk(final BsonDocument response) {
     }
 
     private static BsonDocument sendAndReceive(final String database, final BsonDocument command,
-                                               final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi,
-                                               final InternalConnection internalConnection) {
-        return assertNotNull(internalConnection.sendAndReceive(getCommandMessage(database, command, internalConnection,
-                        clusterConnectionMode, serverApi), new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext()));
+                                               final ClusterConnectionMode clusterConnectionMode,
+                                               @Nullable final ServerApi serverApi,
+                                               final InternalConnection internalConnection,
+                                               final OperationContext operationContext) {
+            return assertNotNull(
+                    internalConnection.sendAndReceive(
+                            getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi),
+                            new BsonDocumentCodec(), operationContext)
+            );
     }
 
     private static CommandMessage getCommandMessage(final String database, final BsonDocument command,
@@ -106,6 +112,7 @@ private static CommandMessage getCommandMessage(final String database, final Bso
                          // which means OP_MSG will not be used
                         .maxWireVersion(internalConnection.getDescription().getMaxWireVersion())
                         .serverType(internalConnection.getDescription().getServerType())
+                        .cryptd(internalConnection.getInitialServerDescription().isCryptd())
                         .build(),
                 clusterConnectionMode, serverApi);
     }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java
index 24b30d60acb..53d869a6b8f 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java
@@ -21,6 +21,7 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterConnectionMode;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
@@ -142,7 +143,7 @@ MongoNamespace getNamespace() {
     }
 
     @Override
-    protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final SessionContext sessionContext) {
+    protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) {
         int messageStartPosition = bsonOutput.getPosition() - MESSAGE_PROLOGUE_LENGTH;
         int commandStartPosition;
         if (useOpMsg()) {
@@ -151,7 +152,7 @@ protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOu
             bsonOutput.writeByte(0);    // payload type
             commandStartPosition = bsonOutput.getPosition();
 
-            addDocument(command, bsonOutput, commandFieldNameValidator, getExtraElements(sessionContext));
+            addDocument(command, bsonOutput, commandFieldNameValidator, getExtraElements(operationContext));
 
             if (payload != null) {
                 bsonOutput.writeByte(1);          // payload type
@@ -214,8 +215,16 @@ private boolean useOpMsg() {
         return getOpCode().equals(OpCode.OP_MSG);
     }
 
-    private List<BsonElement> getExtraElements(final SessionContext sessionContext) {
+    private List<BsonElement> getExtraElements(final OperationContext operationContext) {
+        SessionContext sessionContext = operationContext.getSessionContext();
+        TimeoutContext timeoutContext = operationContext.getTimeoutContext();
+
         List<BsonElement> extraElements = new ArrayList<>();
+        if (!getSettings().isCryptd()) {
+           timeoutContext.runMaxTimeMS(maxTimeMS ->
+                   extraElements.add(new BsonElement("maxTimeMS", new BsonInt64(maxTimeMS)))
+           );
+        }
         extraElements.add(new BsonElement("$db", new BsonString(new MongoNamespace(getCollectionName()).getDatabaseName())));
         if (sessionContext.getClusterTime() != null) {
             extraElements.add(new BsonElement("$clusterTime", sessionContext.getClusterTime()));
diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java
index 7fab16b30a3..2cc78497980 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java
@@ -30,5 +30,5 @@ public interface CommandProtocol<T> {
 
     void executeAsync(InternalConnection connection, SingleResultCallback<T> callback);
 
-    CommandProtocol<T> sessionContext(SessionContext sessionContext);
+    CommandProtocol<T> withSessionContext(SessionContext sessionContext);
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java
index 251b4f21d2d..de9e0666d40 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java
@@ -18,8 +18,6 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.session.SessionContext;
@@ -42,16 +40,12 @@ class CommandProtocolImpl<T> implements CommandProtocol<T> {
     private final Decoder<T> commandResultDecoder;
     private final boolean responseExpected;
     private final ClusterConnectionMode clusterConnectionMode;
-    private final RequestContext requestContext;
-    private SessionContext sessionContext;
-    private final ServerApi serverApi;
     private final OperationContext operationContext;
 
     CommandProtocolImpl(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
             @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final boolean responseExpected,
             @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator,
-            final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, final RequestContext requestContext,
-            final OperationContext operationContext) {
+            final ClusterConnectionMode clusterConnectionMode, final OperationContext operationContext) {
         notNull("database", database);
         this.namespace = new MongoNamespace(notNull("database", database), MongoNamespace.COMMAND_COLLECTION_NAME);
         this.command = notNull("command", command);
@@ -62,8 +56,6 @@ class CommandProtocolImpl<T> implements CommandProtocol<T> {
         this.payload = payload;
         this.payloadFieldNameValidator = payloadFieldNameValidator;
         this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode);
-        this.serverApi = serverApi;
-        this.requestContext = notNull("requestContext", requestContext);
         this.operationContext = operationContext;
 
         isTrueArgument("payloadFieldNameValidator cannot be null if there is a payload.",
@@ -73,15 +65,14 @@ class CommandProtocolImpl<T> implements CommandProtocol<T> {
     @Nullable
     @Override
     public T execute(final InternalConnection connection) {
-        return connection.sendAndReceive(getCommandMessage(connection), commandResultDecoder, sessionContext, requestContext,
-                operationContext);
+        return connection.sendAndReceive(getCommandMessage(connection), commandResultDecoder, operationContext);
     }
 
     @Override
     public void executeAsync(final InternalConnection connection, final SingleResultCallback<T> callback) {
         try {
-            connection.sendAndReceiveAsync(getCommandMessage(connection), commandResultDecoder, sessionContext, requestContext,
-                    operationContext, (result, t) -> {
+            connection.sendAndReceiveAsync(getCommandMessage(connection), commandResultDecoder, operationContext,
+                    (result, t) -> {
                         if (t != null) {
                             callback.onResult(null, t);
                         } else {
@@ -94,14 +85,15 @@ public void executeAsync(final InternalConnection connection, final SingleResult
     }
 
     @Override
-    public CommandProtocolImpl<T> sessionContext(final SessionContext sessionContext) {
-        this.sessionContext = sessionContext;
-        return this;
+    public CommandProtocolImpl<T> withSessionContext(final SessionContext sessionContext) {
+        return new CommandProtocolImpl<>(namespace.getDatabaseName(), command, commandFieldNameValidator, readPreference,
+                commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode,
+                operationContext.withSessionContext(sessionContext));
     }
 
     private CommandMessage getCommandMessage(final InternalConnection connection) {
         return new CommandMessage(namespace, command, commandFieldNameValidator, readPreference,
-                    getMessageSettings(connection.getDescription()), responseExpected, payload,
-                payloadFieldNameValidator, clusterConnectionMode, serverApi);
+                    getMessageSettings(connection.getDescription(), connection.getInitialServerDescription()), responseExpected, payload,
+                payloadFieldNameValidator, clusterConnectionMode, operationContext.getServerApi());
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java
index 698fe2ece9f..9880ef3fb0b 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.connection;
 
-import com.mongodb.internal.session.SessionContext;
 import org.bson.ByteBuf;
 import org.bson.io.BsonOutput;
 
@@ -38,7 +37,7 @@ class CompressedMessage extends RequestMessage {
     }
 
     @Override
-    protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final SessionContext sessionContext) {
+    protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) {
         bsonOutput.writeInt32(wrappedOpcode.getValue());
         bsonOutput.writeInt32(getWrappedMessageSize(wrappedMessageBuffers) - MESSAGE_HEADER_LENGTH);
         bsonOutput.writeByte(compressor.getId());
diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java
index c174e828bde..fe3ac129631 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java
@@ -23,8 +23,7 @@
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.internal.VisibleForTesting;
-import com.mongodb.internal.time.TimePoint;
-import com.mongodb.internal.time.Timeout;
+import com.mongodb.internal.time.StartTime;
 import com.mongodb.lang.Nullable;
 
 import java.util.Deque;
@@ -147,7 +146,7 @@ public T get() {
      * Gets an object from the pool. Blocks until an object is available, or the specified {@code timeout} expires,
      * or the pool is {@linkplain #close() closed}/{@linkplain #pause(Supplier) paused}.
      *
-     * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}.
+     * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}.
      * @param timeUnit the time unit of the timeout
      * @return An object from the pool, or null if can't get one in the given waitTime
      * @throws MongoTimeoutException if the timeout has been exceeded
@@ -231,7 +230,7 @@ private T createNewAndReleasePermitIfFailure() {
     }
 
     /**
-     * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}.
+     * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}.
      */
     @VisibleForTesting(otherwise = PRIVATE)
     boolean acquirePermit(final long timeout, final TimeUnit timeUnit) {
@@ -388,7 +387,7 @@ boolean acquirePermitImmediateUnfair() {
          * This method also emulates the eager {@link InterruptedException} behavior of
          * {@link java.util.concurrent.Semaphore#tryAcquire(long, TimeUnit)}.
          *
-         * @param timeout See {@link Timeout#started(long, TimeUnit, TimePoint)}.
+         * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}.
          */
         boolean acquirePermit(final long timeout, final TimeUnit unit) throws MongoInterruptedException {
             long remainingNanos = unit.toNanos(timeout);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/Connection.java b/driver-core/src/main/com/mongodb/internal/connection/Connection.java
index 6200a626897..95094b240c1 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/Connection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/Connection.java
@@ -19,7 +19,6 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.connection.ConnectionDescription;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -47,11 +46,11 @@ public interface Connection extends ReferenceCounted {
 
     @Nullable
     <T> T command(String database, BsonDocument command, FieldNameValidator fieldNameValidator, @Nullable ReadPreference readPreference,
-            Decoder<T> commandResultDecoder, BindingContext context);
+            Decoder<T> commandResultDecoder, OperationContext operationContext);
 
     @Nullable
     <T> T command(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator,
-            @Nullable ReadPreference readPreference, Decoder<T> commandResultDecoder, BindingContext context,
+            @Nullable ReadPreference readPreference, Decoder<T> commandResultDecoder, OperationContext operationContext,
             boolean responseExpected, @Nullable SplittablePayload payload, @Nullable FieldNameValidator payloadFieldNameValidator);
 
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java
index 39a50063163..2129d42b941 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java
@@ -18,15 +18,11 @@
 
 import com.mongodb.MongoConnectionPoolClearedException;
 import com.mongodb.annotations.ThreadSafe;
-import com.mongodb.connection.ConnectionPoolSettings;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.time.Timeout;
-import com.mongodb.internal.time.TimePoint;
-import org.bson.types.ObjectId;
 import com.mongodb.lang.Nullable;
+import org.bson.types.ObjectId;
 
 import java.io.Closeable;
-import java.util.concurrent.TimeUnit;
 
 /**
  * An instance of an implementation must be created in the {@linkplain #invalidate(Throwable) paused} state.
@@ -34,19 +30,10 @@
 @ThreadSafe
 interface ConnectionPool extends Closeable {
     /**
-     * Is equivalent to {@link #get(OperationContext, long, TimeUnit)} called with {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}.
-     */
-    InternalConnection get(OperationContext operationContext) throws MongoConnectionPoolClearedException;
-
-    /**
-     * @param operationContext operation context
-     * @param timeout This is not a timeout for the whole {@link #get(OperationContext, long, TimeUnit)},
-     * see {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}.
-     * <p>
-     * See {@link Timeout#started(long, TimeUnit, TimePoint)}.</p>
+     * @param operationContext the operation context
      * @throws MongoConnectionPoolClearedException If detects that the pool is {@linkplain #invalidate(Throwable) paused}.
      */
-    InternalConnection get(OperationContext operationContext, long timeout, TimeUnit timeUnit) throws MongoConnectionPoolClearedException;
+    InternalConnection get(OperationContext operationContext) throws MongoConnectionPoolClearedException;
 
     /**
      * Completes the {@code callback} with a {@link MongoConnectionPoolClearedException}
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java
index 13e7ec09a16..a9a3525a90a 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java
@@ -46,10 +46,11 @@ class DefaultAuthenticator extends Authenticator implements SpeculativeAuthentic
     }
 
     @Override
-    void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) {
+    void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription,
+                      final OperationContext operationContext) {
         try {
             setDelegate(connectionDescription);
-            delegate.authenticate(connection, connectionDescription);
+            delegate.authenticate(connection, connectionDescription, operationContext);
         } catch (Exception e) {
             throw wrapException(e);
         }
@@ -57,9 +58,9 @@ void authenticate(final InternalConnection connection, final ConnectionDescripti
 
     @Override
     void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription,
-                           final SingleResultCallback<Void> callback) {
+                           final OperationContext operationContext, final SingleResultCallback<Void> callback) {
         setDelegate(connectionDescription);
-        delegate.authenticateAsync(connection, connectionDescription, callback);
+        delegate.authenticateAsync(connection, connectionDescription, operationContext, callback);
     }
 
     @Override
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java
index 0375373c23b..5fb6de6f69a 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java
@@ -31,6 +31,7 @@
 import com.mongodb.event.CommandListener;
 import com.mongodb.event.ServerListener;
 import com.mongodb.event.ServerMonitorListener;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
@@ -60,7 +61,10 @@ public final class DefaultClusterFactory {
     public Cluster createCluster(final ClusterSettings originalClusterSettings, final ServerSettings originalServerSettings,
                                  final ConnectionPoolSettings connectionPoolSettings,
                                  final InternalConnectionPoolSettings internalConnectionPoolSettings,
-                                 final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory,
+                                 final TimeoutSettings clusterTimeoutSettings,
+                                 final StreamFactory streamFactory,
+                                 final TimeoutSettings heartbeatTimeoutSettings,
+                                 final StreamFactory heartbeatStreamFactory,
                                  @Nullable final MongoCredential credential,
                                  final LoggerSettings loggerSettings,
                                  @Nullable final CommandListener commandListener,
@@ -98,17 +102,22 @@ public Cluster createCluster(final ClusterSettings originalClusterSettings, fina
         }
 
         DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = new DefaultDnsSrvRecordMonitorFactory(clusterId, serverSettings, dnsClient);
+        InternalOperationContextFactory clusterOperationContextFactory =
+                new InternalOperationContextFactory(clusterTimeoutSettings, serverApi);
+        InternalOperationContextFactory heartBeatOperationContextFactory =
+                new InternalOperationContextFactory(heartbeatTimeoutSettings, serverApi);
 
         if (clusterSettings.getMode() == ClusterConnectionMode.LOAD_BALANCED) {
             ClusterableServerFactory serverFactory = new LoadBalancedClusterableServerFactory(serverSettings,
                     connectionPoolSettings, internalConnectionPoolSettings, streamFactory, credential, loggerSettings, commandListener,
                     applicationName, mongoDriverInformation != null ? mongoDriverInformation : MongoDriverInformation.builder().build(),
-                    compressorList, serverApi);
+                    compressorList, serverApi, clusterOperationContextFactory);
             return new LoadBalancedCluster(clusterId, clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
         } else {
             ClusterableServerFactory serverFactory = new DefaultClusterableServerFactory(serverSettings,
                     connectionPoolSettings, internalConnectionPoolSettings,
-                    streamFactory, heartbeatStreamFactory, credential, loggerSettings, commandListener, applicationName,
+                    clusterOperationContextFactory, streamFactory, heartBeatOperationContextFactory, heartbeatStreamFactory, credential,
+                    loggerSettings, commandListener, applicationName,
                     mongoDriverInformation != null ? mongoDriverInformation : MongoDriverInformation.builder().build(), compressorList,
                     serverApi, FaasEnvironment.getFaasEnvironment() != FaasEnvironment.UNKNOWN);
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java
index 7d0f5b62e51..880e1db8521 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java
@@ -43,9 +43,11 @@ public class DefaultClusterableServerFactory implements ClusterableServerFactory
     private final ServerSettings serverSettings;
     private final ConnectionPoolSettings connectionPoolSettings;
     private final InternalConnectionPoolSettings internalConnectionPoolSettings;
+    private final InternalOperationContextFactory clusterOperationContextFactory;
     private final StreamFactory streamFactory;
-    private final MongoCredentialWithCache credential;
+    private final InternalOperationContextFactory heartbeatOperationContextFactory;
     private final StreamFactory heartbeatStreamFactory;
+    private final MongoCredentialWithCache credential;
     private final LoggerSettings loggerSettings;
     private final CommandListener commandListener;
     private final String applicationName;
@@ -58,18 +60,20 @@ public class DefaultClusterableServerFactory implements ClusterableServerFactory
     public DefaultClusterableServerFactory(
             final ServerSettings serverSettings, final ConnectionPoolSettings connectionPoolSettings,
             final InternalConnectionPoolSettings internalConnectionPoolSettings,
-            final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory,
-            @Nullable final MongoCredential credential,
-            final LoggerSettings loggerSettings,
-            @Nullable final CommandListener commandListener,
-            @Nullable final String applicationName, @Nullable final MongoDriverInformation mongoDriverInformation,
+            final InternalOperationContextFactory clusterOperationContextFactory, final StreamFactory streamFactory,
+            final InternalOperationContextFactory heartbeatOperationContextFactory, final StreamFactory heartbeatStreamFactory,
+            @Nullable final MongoCredential credential, final LoggerSettings loggerSettings,
+            @Nullable final CommandListener commandListener, @Nullable final String applicationName,
+            @Nullable final MongoDriverInformation mongoDriverInformation,
             final List<MongoCompressor> compressorList, @Nullable final ServerApi serverApi, final boolean isFunctionAsAServiceEnvironment) {
         this.serverSettings = serverSettings;
         this.connectionPoolSettings = connectionPoolSettings;
         this.internalConnectionPoolSettings = internalConnectionPoolSettings;
+        this.clusterOperationContextFactory = clusterOperationContextFactory;
         this.streamFactory = streamFactory;
-        this.credential = credential == null ? null : new MongoCredentialWithCache(credential);
+        this.heartbeatOperationContextFactory = heartbeatOperationContextFactory;
         this.heartbeatStreamFactory = heartbeatStreamFactory;
+        this.credential = credential == null ? null : new MongoCredentialWithCache(credential);
         this.loggerSettings = loggerSettings;
         this.commandListener = commandListener;
         this.applicationName = applicationName;
@@ -88,11 +92,11 @@ public ClusterableServer create(final Cluster cluster, final ServerAddress serve
                 // no credentials, compressor list, or command listener for the server monitor factory
                 new InternalStreamConnectionFactory(clusterMode, true, heartbeatStreamFactory, null, applicationName,
                         mongoDriverInformation, emptyList(), loggerSettings, null, serverApi),
-                clusterMode, serverApi, isFunctionAsAServiceEnvironment, sdamProvider);
+                clusterMode, serverApi, isFunctionAsAServiceEnvironment, sdamProvider, heartbeatOperationContextFactory);
         ConnectionPool connectionPool = new DefaultConnectionPool(serverId,
                 new InternalStreamConnectionFactory(clusterMode, streamFactory, credential, applicationName,
                         mongoDriverInformation, compressorList, loggerSettings, commandListener, serverApi),
-                connectionPoolSettings, internalConnectionPoolSettings, sdamProvider);
+                connectionPoolSettings, internalConnectionPoolSettings, sdamProvider, clusterOperationContextFactory);
         ServerListener serverListener = singleServerListener(serverSettings);
         SdamServerDescriptionManager sdam = new DefaultSdamServerDescriptionManager(cluster, serverId, serverListener, serverMonitor,
                 connectionPool, clusterMode);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java
index 26676718d41..78db18db2dc 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java
@@ -21,7 +21,6 @@
 import com.mongodb.MongoInterruptedException;
 import com.mongodb.MongoServerUnavailableException;
 import com.mongodb.MongoTimeoutException;
-import com.mongodb.RequestContext;
 import com.mongodb.annotations.NotThreadSafe;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.connection.ClusterId;
@@ -52,9 +51,8 @@
 import com.mongodb.internal.inject.OptionalProvider;
 import com.mongodb.internal.logging.LogMessage;
 import com.mongodb.internal.logging.StructuredLogger;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.internal.thread.DaemonThreadFactory;
-import com.mongodb.internal.time.TimePoint;
+import com.mongodb.internal.time.StartTime;
 import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.NonNull;
 import com.mongodb.lang.Nullable;
@@ -120,18 +118,17 @@
 import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVICE_ID;
 import static com.mongodb.internal.logging.LogMessage.Entry.Name.WAIT_QUEUE_TIMEOUT_MS;
 import static com.mongodb.internal.logging.LogMessage.Level.DEBUG;
-import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static java.lang.String.format;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
-@SuppressWarnings("deprecation")
 @ThreadSafe
 final class DefaultConnectionPool implements ConnectionPool {
     private static final Logger LOGGER = Loggers.getLogger("connection");
     private static final StructuredLogger STRUCTURED_LOGGER = new StructuredLogger("connection");
     private final ConcurrentPool<UsageTrackingInternalConnection> pool;
     private final ConnectionPoolSettings settings;
+    private final InternalOperationContextFactory operationContextFactory;
     private final BackgroundMaintenanceManager backgroundMaintenance;
     private final AsyncWorkManager asyncWorkManager;
     private final ConnectionPoolListener connectionPoolListener;
@@ -145,8 +142,10 @@ final class DefaultConnectionPool implements ConnectionPool {
 
     @VisibleForTesting(otherwise = PRIVATE)
     DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory,
-            final ConnectionPoolSettings settings, final OptionalProvider<SdamServerDescriptionManager> sdamProvider) {
-        this(serverId, internalConnectionFactory, settings, InternalConnectionPoolSettings.builder().build(), sdamProvider);
+            final ConnectionPoolSettings settings, final OptionalProvider<SdamServerDescriptionManager> sdamProvider,
+            final InternalOperationContextFactory operationContextFactory) {
+        this(serverId, internalConnectionFactory, settings, InternalConnectionPoolSettings.builder().build(), sdamProvider,
+                operationContextFactory);
     }
 
     /**
@@ -160,13 +159,15 @@ final class DefaultConnectionPool implements ConnectionPool {
      */
     DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory,
             final ConnectionPoolSettings settings, final InternalConnectionPoolSettings internalSettings,
-            final OptionalProvider<SdamServerDescriptionManager> sdamProvider) {
+            final OptionalProvider<SdamServerDescriptionManager> sdamProvider,
+            final InternalOperationContextFactory operationContextFactory) {
         this.serverId = notNull("serverId", serverId);
         this.settings = notNull("settings", settings);
         UsageTrackingInternalConnectionItemFactory connectionItemFactory =
                 new UsageTrackingInternalConnectionItemFactory(internalConnectionFactory);
         pool = new ConcurrentPool<>(maxSize(settings), connectionItemFactory, format("The server at %s is no longer available",
                 serverId.getAddress()));
+        this.operationContextFactory = assertNotNull(operationContextFactory);
         this.sdamProvider = assertNotNull(sdamProvider);
         this.connectionPoolListener = getConnectionPoolListener(settings);
         backgroundMaintenance = new BackgroundMaintenanceManager();
@@ -189,18 +190,13 @@ public int getGeneration(@NonNull final ObjectId serviceId) {
 
     @Override
     public InternalConnection get(final OperationContext operationContext) {
-        return get(operationContext, settings.getMaxWaitTime(MILLISECONDS), MILLISECONDS);
-    }
-
-    @Override
-    public InternalConnection get(final OperationContext operationContext, final long timeoutValue, final TimeUnit timeUnit) {
-        TimePoint checkoutStart = connectionCheckoutStarted(operationContext);
-        Timeout timeout = Timeout.started(timeoutValue, timeUnit, checkoutStart);
+        StartTime checkoutStart = connectionCheckoutStarted(operationContext);
+        Timeout waitQueueTimeout =  operationContext.getTimeoutContext().startWaitQueueTimeout(checkoutStart);
         try {
             stateAndGeneration.throwIfClosedOrPaused();
-            PooledConnection connection = getPooledConnection(timeout);
+            PooledConnection connection = getPooledConnection(waitQueueTimeout, checkoutStart);
             if (!connection.opened()) {
-                connection = openConcurrencyLimiter.openOrGetAvailable(connection, timeout);
+                connection = openConcurrencyLimiter.openOrGetAvailable(operationContext, connection, waitQueueTimeout, checkoutStart);
             }
             connection.checkedOutForOperation(operationContext);
             connectionCheckedOut(operationContext, connection, checkoutStart);
@@ -212,12 +208,12 @@ public InternalConnection get(final OperationContext operationContext, final lon
 
     @Override
     public void getAsync(final OperationContext operationContext, final SingleResultCallback<InternalConnection> callback) {
-        TimePoint checkoutStart = connectionCheckoutStarted(operationContext);
-        Timeout timeout = Timeout.started(settings.getMaxWaitTime(NANOSECONDS), checkoutStart);
+        StartTime checkoutStart = connectionCheckoutStarted(operationContext);
+        Timeout maxWaitTimeout = checkoutStart.timeoutAfterOrInfiniteIfNegative(settings.getMaxWaitTime(NANOSECONDS), NANOSECONDS);
         SingleResultCallback<PooledConnection> eventSendingCallback = (connection, failure) -> {
             SingleResultCallback<InternalConnection> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
             if (failure == null) {
-                connection.checkedOutForOperation(operationContext);
+                assertNotNull(connection).checkedOutForOperation(operationContext);
                 connectionCheckedOut(operationContext, connection, checkoutStart);
                 errHandlingCallback.onResult(connection, null);
             } else {
@@ -230,13 +226,13 @@ public void getAsync(final OperationContext operationContext, final SingleResult
             eventSendingCallback.onResult(null, e);
             return;
         }
-        asyncWorkManager.enqueue(new Task(timeout, t -> {
+        asyncWorkManager.enqueue(new Task(maxWaitTimeout, checkoutStart, t -> {
             if (t != null) {
                 eventSendingCallback.onResult(null, t);
             } else {
                 PooledConnection connection;
                 try {
-                    connection = getPooledConnection(timeout);
+                    connection = getPooledConnection(maxWaitTimeout, checkoutStart);
                 } catch (Exception e) {
                     eventSendingCallback.onResult(null, e);
                     return;
@@ -244,7 +240,8 @@ public void getAsync(final OperationContext operationContext, final SingleResult
                 if (connection.opened()) {
                     eventSendingCallback.onResult(connection, null);
                 } else {
-                    openConcurrencyLimiter.openAsyncWithConcurrencyLimit(connection, timeout, eventSendingCallback);
+                    openConcurrencyLimiter.openWithConcurrencyLimitAsync(
+                            operationContext, connection, maxWaitTimeout, checkoutStart, eventSendingCallback);
                 }
             }
         }));
@@ -255,7 +252,7 @@ public void getAsync(final OperationContext operationContext, final SingleResult
      * and returns {@code t} if it is not {@link MongoOpenConnectionInternalException},
      * or returns {@code t.}{@linkplain MongoOpenConnectionInternalException#getCause() getCause()} otherwise.
      */
-    private Throwable checkOutFailed(final Throwable t, final OperationContext operationContext, final TimePoint checkoutStart) {
+    private Throwable checkOutFailed(final Throwable t, final OperationContext operationContext, final StartTime checkoutStart) {
         Throwable result = t;
         Reason reason;
         if (t instanceof MongoTimeoutException) {
@@ -334,16 +331,22 @@ public int getGeneration() {
         return stateAndGeneration.generation();
     }
 
-    private PooledConnection getPooledConnection(final Timeout timeout) throws MongoTimeoutException {
+    private PooledConnection getPooledConnection(final Timeout waitQueueTimeout, final StartTime startTime) throws MongoTimeoutException {
         try {
-            UsageTrackingInternalConnection internalConnection = pool.get(timeout.remainingOrInfinite(NANOSECONDS), NANOSECONDS);
+            UsageTrackingInternalConnection internalConnection = waitQueueTimeout.call(NANOSECONDS,
+                    () -> pool.get(-1L, NANOSECONDS),
+                    (ns) -> pool.get(ns, NANOSECONDS),
+                    () -> pool.get(0L, NANOSECONDS));
             while (shouldPrune(internalConnection)) {
                 pool.release(internalConnection, true);
-                internalConnection = pool.get(timeout.remainingOrInfinite(NANOSECONDS), NANOSECONDS);
+                internalConnection = waitQueueTimeout.call(NANOSECONDS,
+                        () -> pool.get(-1L, NANOSECONDS),
+                        (ns) -> pool.get(ns, NANOSECONDS),
+                        () -> pool.get(0L, NANOSECONDS));
             }
             return new PooledConnection(internalConnection);
         } catch (MongoTimeoutException e) {
-            throw createTimeoutException(timeout);
+            throw createTimeoutException(startTime);
         }
     }
 
@@ -357,12 +360,13 @@ private PooledConnection getPooledConnectionImmediateUnfair() {
         return internalConnection == null ? null : new PooledConnection(internalConnection);
     }
 
-    private MongoTimeoutException createTimeoutException(final Timeout timeout) {
+    private MongoTimeoutException createTimeoutException(final StartTime startTime) {
+        long elapsedMs = startTime.elapsed().toMillis();
         int numPinnedToCursor = pinnedStatsManager.getNumPinnedToCursor();
         int numPinnedToTransaction = pinnedStatsManager.getNumPinnedToTransaction();
         if (numPinnedToCursor == 0 && numPinnedToTransaction == 0) {
-            return new MongoTimeoutException(format("Timed out after %s while waiting for a connection to server %s.",
-                    timeout.toUserString(), serverId.getAddress()));
+            return new MongoTimeoutException(format("Timed out after %d ms while waiting for a connection to server %s.",
+                    elapsedMs, serverId.getAddress()));
         } else {
             int maxSize = pool.getMaxSize();
             int numInUse = pool.getInUseCount();
@@ -391,10 +395,10 @@ private MongoTimeoutException createTimeoutException(final Timeout timeout) {
             int numOtherInUse = numInUse - numPinnedToCursor - numPinnedToTransaction;
             assertTrue(numOtherInUse >= 0);
             assertTrue(numPinnedToCursor + numPinnedToTransaction + numOtherInUse <= maxSize);
-            return new MongoTimeoutException(format("Timed out after %s while waiting for a connection to server %s. Details: "
+            return new MongoTimeoutException(format("Timed out after %d ms while waiting for a connection to server %s. Details: "
                             + "maxPoolSize: %s, connections in use by cursors: %d, connections in use by transactions: %d, "
                             + "connections in use by other operations: %d",
-                    timeout.toUserString(), serverId.getAddress(),
+                    elapsedMs, serverId.getAddress(),
                     sizeToString(maxSize), numPinnedToCursor, numPinnedToTransaction,
                     numOtherInUse));
         }
@@ -418,7 +422,8 @@ void doMaintenance() {
             if (shouldEnsureMinSize()) {
                 pool.ensureMinSize(settings.getMinSize(), newConnection -> {
                     try {
-                        openConcurrencyLimiter.openImmediatelyAndTryHandOverOrRelease(new PooledConnection(newConnection));
+                        OperationContext operationContext = operationContextFactory.createMaintenanceContext();
+                        openConcurrencyLimiter.openImmediatelyAndTryHandOverOrRelease(operationContext, new PooledConnection(newConnection));
                     } catch (MongoException | MongoOpenConnectionInternalException e) {
                         RuntimeException actualException = e instanceof MongoOpenConnectionInternalException
                                 ? (RuntimeException) e.getCause()
@@ -504,13 +509,14 @@ private void connectionPoolCreated(final ConnectionPoolListener connectionPoolLi
      * Send both current and deprecated events in order to preserve backwards compatibility.
      * Must not throw {@link Exception}s.
      *
-     * @return A {@link TimePoint} before executing {@link ConnectionPoolListener#connectionCreated(ConnectionCreatedEvent)}
+     * @return A {@link StartTime} before executing {@link ConnectionPoolListener#connectionCreated(ConnectionCreatedEvent)}
      * and logging the event. This order is required by
+
      * <a href="https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst#events">CMAP</a>
      * and {@link ConnectionReadyEvent#getElapsedTime(TimeUnit)}.
      */
-    private TimePoint connectionCreated(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId) {
-        TimePoint openStart = TimePoint.now();
+    private StartTime connectionCreated(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId) {
+        StartTime openStart = StartTime.now();
         logEventMessage("Connection created",
                 "Connection created: address={}:{}, driver-generated ID={}",
                 connectionId.getLocalValue());
@@ -545,7 +551,7 @@ private void connectionClosed(final ConnectionPoolListener connectionPoolListene
     private void connectionCheckedOut(
             final OperationContext operationContext,
             final PooledConnection connection,
-            final TimePoint checkoutStart) {
+            final StartTime checkoutStart) {
         Duration checkoutDuration = checkoutStart.elapsed();
         ConnectionId connectionId = getId(connection);
         ClusterId clusterId = serverId.getClusterId();
@@ -562,18 +568,19 @@ private void connectionCheckedOut(
     }
 
     /**
-     * @return A {@link TimePoint} before executing
+     * @return A {@link StartTime} before executing
      * {@link ConnectionPoolListener#connectionCheckOutStarted(ConnectionCheckOutStartedEvent)} and logging the event.
      * This order is required by
      * <a href="https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst#events">CMAP</a>
      * and {@link ConnectionCheckedOutEvent#getElapsedTime(TimeUnit)}, {@link ConnectionCheckOutFailedEvent#getElapsedTime(TimeUnit)}.
      */
-    private TimePoint connectionCheckoutStarted(final OperationContext operationContext) {
-        TimePoint checkoutStart = TimePoint.now();
+    private StartTime connectionCheckoutStarted(final OperationContext operationContext) {
+        StartTime checkoutStart = StartTime.now();
         logEventMessage("Connection checkout started", "Checkout started for connection to {}:{}");
 
         connectionPoolListener.connectionCheckOutStarted(new ConnectionCheckOutStartedEvent(serverId, operationContext.getId()));
         return checkoutStart;
+
     }
 
     /**
@@ -598,7 +605,7 @@ private class PooledConnection implements InternalConnection {
         private final UsageTrackingInternalConnection wrapped;
         private final AtomicBoolean isClosed = new AtomicBoolean();
         private Connection.PinningMode pinningMode;
-        private OperationContext operationContext;
+        private long operationId;
 
         PooledConnection(final UsageTrackingInternalConnection wrapped) {
             this.wrapped = notNull("wrapped", wrapped);
@@ -610,19 +617,19 @@ public int getGeneration() {
         }
 
         /**
-         * Associates this with the operation context and establishes the checked out start time
+         * Associates this with the operation id and establishes the checked out start time
          */
         public void checkedOutForOperation(final OperationContext operationContext) {
-            this.operationContext = operationContext;
+            this.operationId = operationContext.getId();
         }
 
         @Override
-        public void open() {
+        public void open(final OperationContext operationContext) {
             assertFalse(isClosed.get());
-            TimePoint openStart;
+            StartTime openStart;
             try {
                 openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId());
-                wrapped.open();
+                wrapped.open(operationContext);
             } catch (Exception e) {
                 closeAndHandleOpenFailure();
                 throw new MongoOpenConnectionInternalException(e);
@@ -631,10 +638,10 @@ public void open() {
         }
 
         @Override
-        public void openAsync(final SingleResultCallback<Void> callback) {
+        public void openAsync(final OperationContext operationContext, final SingleResultCallback<Void> callback) {
             assertFalse(isClosed.get());
-            TimePoint openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId());
-            wrapped.openAsync((nullResult, failure) -> {
+            StartTime openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId());
+            wrapped.openAsync(operationContext, (nullResult, failure) -> {
                 if (failure != null) {
                     closeAndHandleOpenFailure();
                     callback.onResult(null, new MongoOpenConnectionInternalException(failure));
@@ -664,8 +671,7 @@ private void connectionCheckedIn() {
             logEventMessage("Connection checked in",
                     "Connection checked in: address={}:{}, driver-generated ID={}",
                     connectionId.getLocalValue());
-
-            connectionPoolListener.connectionCheckedIn(new ConnectionCheckedInEvent(connectionId, operationContext.getId()));
+            connectionPoolListener.connectionCheckedIn(new ConnectionCheckedInEvent(connectionId, operationId));
         }
 
         void release() {
@@ -701,7 +707,7 @@ private void closeAndHandleOpenFailure() {
         /**
          * Must not throw {@link Exception}s.
          */
-        private void handleOpenSuccess(final TimePoint openStart) {
+        private void handleOpenSuccess(final StartTime openStart) {
             Duration openDuration = openStart.elapsed();
             ConnectionId connectionId = getId(this);
             ClusterId clusterId = serverId.getClusterId();
@@ -731,34 +737,27 @@ public ByteBuf getBuffer(final int capacity) {
         }
 
         @Override
-        public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
+        public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
             isTrue("open", !isClosed.get());
-            wrapped.sendMessage(byteBuffers, lastRequestId);
+            wrapped.sendMessage(byteBuffers, lastRequestId, operationContext);
         }
 
         @Override
-        public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                final RequestContext requestContext, final OperationContext operationContext) {
+        public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
             isTrue("open", !isClosed.get());
-            return wrapped.sendAndReceive(message, decoder, sessionContext, requestContext, operationContext);
+            return wrapped.sendAndReceive(message, decoder, operationContext);
         }
 
         @Override
-        public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
+        public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
             isTrue("open", !isClosed.get());
-            wrapped.send(message, decoder, sessionContext);
+            wrapped.send(message, decoder, operationContext);
         }
 
         @Override
-        public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
+        public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
             isTrue("open", !isClosed.get());
-            return wrapped.receive(decoder, sessionContext);
-        }
-
-        @Override
-        public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext, final int additionalTimeout) {
-            isTrue("open", !isClosed.get());
-            return wrapped.receive(decoder, sessionContext, additionalTimeout);
+            return wrapped.receive(decoder, operationContext);
         }
 
         @Override
@@ -768,28 +767,30 @@ public boolean hasMoreToCome() {
         }
 
         @Override
-        public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback<T> callback) {
+        public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
+                final OperationContext operationContext, final SingleResultCallback<T> callback) {
             isTrue("open", !isClosed.get());
-            wrapped.sendAndReceiveAsync(message, decoder, sessionContext, requestContext, operationContext, (result, t) -> callback.onResult(result, t));
+            wrapped.sendAndReceiveAsync(message, decoder, operationContext, callback);
         }
 
         @Override
-        public ResponseBuffers receiveMessage(final int responseTo) {
+        public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
             isTrue("open", !isClosed.get());
-            return wrapped.receiveMessage(responseTo);
+            return wrapped.receiveMessage(responseTo, operationContext);
         }
 
         @Override
-        public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final SingleResultCallback<Void> callback) {
+        public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext,
+                final SingleResultCallback<Void> callback) {
             isTrue("open", !isClosed.get());
-            wrapped.sendMessageAsync(byteBuffers, lastRequestId, (result, t) -> callback.onResult(null, t));
+            wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, (result, t) -> callback.onResult(null, t));
         }
 
         @Override
-        public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+        public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+                final SingleResultCallback<ResponseBuffers> callback) {
             isTrue("open", !isClosed.get());
-            wrapped.receiveMessageAsync(responseTo, (result, t) -> callback.onResult(result, t));
+            wrapped.receiveMessageAsync(responseTo, operationContext, callback);
         }
 
         @Override
@@ -825,7 +826,7 @@ public ServerDescription getInitialServerDescription() {
     /**
      * This internal exception is used to express an exceptional situation encountered when opening a connection.
      * It exists because it allows consolidating the code that sends events for exceptional situations in a
-     * {@linkplain #checkOutFailed(Throwable, OperationContext, TimePoint) single place}, it must not be observable by an external code.
+     * {@linkplain #checkOutFailed(Throwable, OperationContext, StartTime) single place}, it must not be observable by an external code.
      */
     private static final class MongoOpenConnectionInternalException extends RuntimeException {
         private static final long serialVersionUID = 1;
@@ -902,19 +903,29 @@ private final class OpenConcurrencyLimiter {
             desiredConnectionSlots = new LinkedList<>();
         }
 
-        PooledConnection openOrGetAvailable(final PooledConnection connection, final Timeout timeout) throws MongoTimeoutException {
-            PooledConnection result = openWithConcurrencyLimit(connection, OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, timeout);
+        PooledConnection openOrGetAvailable(final OperationContext operationContext, final PooledConnection connection,
+                final Timeout waitQueueTimeout, final StartTime startTime)
+                throws MongoTimeoutException {
+            PooledConnection result = openWithConcurrencyLimit(
+                    operationContext, connection, OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE,
+                    waitQueueTimeout, startTime);
             return assertNotNull(result);
         }
 
-        void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) throws MongoTimeoutException {
-            assertNull(openWithConcurrencyLimit(connection, OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE, Timeout.immediate()));
+        void openImmediatelyAndTryHandOverOrRelease(final OperationContext operationContext,
+                final PooledConnection connection) throws MongoTimeoutException {
+            StartTime startTime = StartTime.now();
+            Timeout timeout = startTime.asTimeout();
+            assertNull(openWithConcurrencyLimit(
+                    operationContext,
+                    connection, OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE,
+                    timeout, startTime));
         }
 
         /**
-         * This method can be thought of as operating in two phases.
-         * In the first phase it tries to synchronously acquire a permit to open the {@code connection}
-         * or get a different {@linkplain PooledConnection#opened() opened} connection if {@code mode} is
+         * This method can be thought of as operating in two phases. In the first phase it tries to synchronously
+         * acquire a permit to open the {@code connection} or get a different
+         * {@linkplain PooledConnection#opened() opened} connection if {@code mode} is
          * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE} and one becomes available while waiting for a permit.
          * The first phase has one of the following outcomes:
          * <ol>
@@ -925,7 +936,7 @@ void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) t
          *     This outcome is possible only if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}.</li>
          *     <li>A permit is acquired, {@link #connectionCreated(ConnectionPoolListener, ConnectionId)} is reported
          *     and an attempt to open the specified {@code connection} is made. This is the second phase in which
-         *     the {@code connection} is {@linkplain PooledConnection#open() opened synchronously}.
+         *     the {@code connection} is {@linkplain InternalConnection#open(OperationContext) opened synchronously}.
          *     The attempt to open the {@code connection} has one of the following outcomes
          *     combined with releasing the acquired permit:
          *     <ol>
@@ -939,20 +950,23 @@ void openImmediatelyAndTryHandOverOrRelease(final PooledConnection connection) t
          *     </li>
          * </ol>
          *
-         * @param timeout Applies only to the first phase.
-         * @return An {@linkplain PooledConnection#opened() opened} connection which is
-         * either the specified {@code connection},
-         * or potentially a different one if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE},
-         * or {@code null} if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}.
+         * @param operationContext the operation context
+         * @param waitQueueTimeout Applies only to the first phase.
+         * @return An {@linkplain PooledConnection#opened() opened} connection which is either the specified
+         * {@code connection}, or potentially a different one if {@code mode} is
+         * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}, or {@code null} if {@code mode} is
+         * {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}.
          * @throws MongoTimeoutException If the first phase timed out.
          */
         @Nullable
-        private PooledConnection openWithConcurrencyLimit(final PooledConnection connection, final OpenWithConcurrencyLimitMode mode,
-                final Timeout timeout) throws MongoTimeoutException {
+        private PooledConnection openWithConcurrencyLimit(final OperationContext operationContext,
+                final PooledConnection connection, final OpenWithConcurrencyLimitMode mode,
+                final Timeout waitQueueTimeout, final StartTime startTime)
+                throws MongoTimeoutException {
             PooledConnection availableConnection;
             try {//phase one
                 availableConnection = acquirePermitOrGetAvailableOpenedConnection(
-                        mode == OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, timeout);
+                        mode == OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, waitQueueTimeout, startTime);
             } catch (Exception e) {
                 connection.closeSilently();
                 throw e;
@@ -962,7 +976,7 @@ private PooledConnection openWithConcurrencyLimit(final PooledConnection connect
                 return availableConnection;
             } else {//acquired a permit, phase two
                 try {
-                    connection.open();
+                    connection.open(operationContext);
                     if (mode == OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE) {
                         tryHandOverOrRelease(connection.wrapped);
                         return null;
@@ -976,23 +990,25 @@ private PooledConnection openWithConcurrencyLimit(final PooledConnection connect
         }
 
         /**
-         * This method is similar to {@link #openWithConcurrencyLimit(PooledConnection, OpenWithConcurrencyLimitMode, Timeout)}
+         * This method is similar to {@link #openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime)}
          * with the following differences:
          * <ul>
          *     <li>It does not have the {@code mode} parameter and acts as if this parameter were
          *     {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}.</li>
          *     <li>While the first phase is still synchronous, the {@code connection} is
-         *     {@linkplain PooledConnection#openAsync(SingleResultCallback) opened asynchronously} in the second phase.</li>
+         *     {@linkplain InternalConnection#openAsync(OperationContext, SingleResultCallback) opened asynchronously} in the second phase.</li>
          *     <li>Instead of returning a result or throwing an exception via Java {@code return}/{@code throw} statements,
          *     it calls {@code callback.}{@link SingleResultCallback#onResult(Object, Throwable) onResult(result, failure)}
          *     and passes either a {@link PooledConnection} or an {@link Exception}.</li>
          * </ul>
          */
-        void openAsyncWithConcurrencyLimit(
-                final PooledConnection connection, final Timeout timeout, final SingleResultCallback<PooledConnection> callback) {
+        void openWithConcurrencyLimitAsync(
+                final OperationContext operationContext, final PooledConnection connection,
+                final Timeout maxWaitTimeout, final StartTime startTime,
+                final SingleResultCallback<PooledConnection> callback) {
             PooledConnection availableConnection;
             try {//phase one
-                availableConnection = acquirePermitOrGetAvailableOpenedConnection(true, timeout);
+                availableConnection = acquirePermitOrGetAvailableOpenedConnection(true, maxWaitTimeout, startTime);
             } catch (Exception e) {
                 connection.closeSilently();
                 callback.onResult(null, e);
@@ -1002,7 +1018,7 @@ void openAsyncWithConcurrencyLimit(
                 connection.closeSilently();
                 callback.onResult(availableConnection, null);
             } else {//acquired a permit, phase two
-                connection.openAsync((nullResult, failure) -> {
+                connection.openAsync(operationContext, (nullResult, failure) -> {
                     releasePermit();
                     if (failure != null) {
                         callback.onResult(null, failure);
@@ -1022,7 +1038,8 @@ void openAsyncWithConcurrencyLimit(
          * set on entry to this method or is interrupted while waiting to get an available opened connection.
          */
         @Nullable
-        private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boolean tryGetAvailable, final Timeout timeout)
+        private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boolean tryGetAvailable,
+                final Timeout waitQueueTimeout, final StartTime startTime)
                 throws MongoTimeoutException, MongoInterruptedException {
             PooledConnection availableConnection = null;
             boolean expressedDesireToGetAvailableConnection = false;
@@ -1048,15 +1065,16 @@ private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boole
                     expressDesireToGetAvailableConnection();
                     expressedDesireToGetAvailableConnection = true;
                 }
-                long remainingNanos = timeout.remainingOrInfinite(NANOSECONDS);
                 while (permits == 0
                         // the absence of short-circuiting is of importance
                         & !stateAndGeneration.throwIfClosedOrPaused()
                         & (availableConnection = tryGetAvailable ? tryGetAvailableConnection() : null) == null) {
-                    if (Timeout.expired(remainingNanos)) {
-                        throw createTimeoutException(timeout);
-                    }
-                    remainingNanos = awaitNanos(permitAvailableOrHandedOverOrClosedOrPausedCondition, remainingNanos);
+
+                    Timeout.onExistsAndExpired(waitQueueTimeout, () -> {
+                        throw createTimeoutException(startTime);
+                    });
+                    waitQueueTimeout.awaitOn(permitAvailableOrHandedOverOrClosedOrPausedCondition,
+                            () -> "acquiring permit or getting available opened connection");
                 }
                 if (availableConnection == null) {
                     assertTrue(permits > 0);
@@ -1129,28 +1147,10 @@ void tryHandOverOrRelease(final UsageTrackingInternalConnection openConnection)
         void signalClosedOrPaused() {
             withUnfairLock(lock, permitAvailableOrHandedOverOrClosedOrPausedCondition::signalAll);
         }
-
-        /**
-         * @param timeoutNanos See {@link Timeout#started(long, TimePoint)}.
-         * @return The remaining duration as per {@link Timeout#remainingOrInfinite(TimeUnit)} if waiting ended early either
-         * spuriously or because of receiving a signal.
-         */
-        private long awaitNanos(final Condition condition, final long timeoutNanos) throws MongoInterruptedException {
-            try {
-                if (timeoutNanos < 0 || timeoutNanos == Long.MAX_VALUE) {
-                    condition.await();
-                    return -1;
-                } else {
-                    return Math.max(0, condition.awaitNanos(timeoutNanos));
-                }
-            } catch (InterruptedException e) {
-                throw interruptAndCreateMongoInterruptedException(null, e);
-            }
-        }
     }
 
     /**
-     * @see OpenConcurrencyLimiter#openWithConcurrencyLimit(PooledConnection, OpenWithConcurrencyLimitMode, Timeout)
+     * @see OpenConcurrencyLimiter#openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime)
      */
     private enum OpenWithConcurrencyLimitMode {
         TRY_GET_AVAILABLE,
@@ -1341,11 +1341,11 @@ private void workerRun() {
             while (state != State.CLOSED) {
                 try {
                     Task task = tasks.take();
-                    if (task.timeout().expired()) {
-                        task.failAsTimedOut();
-                    } else {
-                        task.execute();
-                    }
+
+                    task.timeout().run(NANOSECONDS,
+                            () -> task.execute(),
+                            (ns) -> task.execute(),
+                            () -> task.failAsTimedOut());
                 } catch (InterruptedException closed) {
                     // fail the rest of the tasks and stop
                 } catch (Exception e) {
@@ -1391,11 +1391,13 @@ private enum State {
     @NotThreadSafe
     final class Task {
         private final Timeout timeout;
+        private final StartTime startTime;
         private final Consumer<RuntimeException> action;
         private boolean completed;
 
-        Task(final Timeout timeout, final Consumer<RuntimeException> action) {
+        Task(final Timeout timeout, final StartTime startTime, final Consumer<RuntimeException> action) {
             this.timeout = timeout;
+            this.startTime = startTime;
             this.action = action;
         }
 
@@ -1408,7 +1410,7 @@ void failAsClosed() {
         }
 
         void failAsTimedOut() {
-            doComplete(() -> createTimeoutException(timeout));
+            doComplete(() -> createTimeoutException(startTime));
         }
 
         private void doComplete(final Supplier<RuntimeException> failureSupplier) {
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java
index 2b300cdfa50..8f3d0f09fd9 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java
@@ -18,7 +18,6 @@
 
 import com.mongodb.MongoException;
 import com.mongodb.MongoServerUnavailableException;
-import com.mongodb.MongoSocketException;
 import com.mongodb.ReadPreference;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ConnectionDescription;
@@ -29,7 +28,6 @@
 import com.mongodb.event.ServerOpeningEvent;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
@@ -198,15 +196,16 @@ ServerId serverId() {
         return serverId;
     }
 
-    private class DefaultServerProtocolExecutor implements ProtocolExecutor {
+    private class DefaultServerProtocolExecutor extends AbstractProtocolExecutor {
 
         @SuppressWarnings("unchecked")
         @Override
         public <T> T execute(final CommandProtocol<T> protocol, final InternalConnection connection,
                              final SessionContext sessionContext) {
             try {
-                protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock));
-                return protocol.execute(connection);
+                return protocol
+                        .withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock))
+                        .execute(connection);
             } catch (MongoException e) {
                 try {
                     sdam.handleExceptionAfterHandshake(SdamIssue.specific(e, sdam.context(connection)));
@@ -216,9 +215,9 @@ public <T> T execute(final CommandProtocol<T> protocol, final InternalConnection
                 if (e instanceof MongoWriteConcernWithResponseException) {
                     return (T) ((MongoWriteConcernWithResponseException) e).getResponse();
                 } else {
-                    if (e instanceof MongoSocketException && sessionContext.hasSession()) {
+                    if (shouldMarkSessionDirty(e, sessionContext)) {
                         sessionContext.markSessionDirty();
-                    }
+                       }
                     throw e;
                 }
             }
@@ -228,8 +227,8 @@ public <T> T execute(final CommandProtocol<T> protocol, final InternalConnection
         @Override
         public <T> void executeAsync(final CommandProtocol<T> protocol, final InternalConnection connection,
                                      final SessionContext sessionContext, final SingleResultCallback<T> callback) {
-            protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock));
-            protocol.executeAsync(connection, errorHandlingCallback((result, t) -> {
+            protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock))
+                    .executeAsync(connection, errorHandlingCallback((result, t) -> {
                 if (t != null) {
                     try {
                         sdam.handleExceptionAfterHandshake(SdamIssue.specific(t, sdam.context(connection)));
@@ -239,7 +238,7 @@ public <T> void executeAsync(final CommandProtocol<T> protocol, final InternalCo
                         if (t instanceof MongoWriteConcernWithResponseException) {
                             callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null);
                         } else {
-                            if (t instanceof MongoSocketException && sessionContext.hasSession()) {
+                            if (shouldMarkSessionDirty(t, sessionContext)) {
                                 sessionContext.markSessionDirty();
                             }
                             callback.onResult(null, t);
@@ -295,16 +294,16 @@ public ConnectionDescription getDescription() {
         @Override
         public <T> T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
                 @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-                final BindingContext context) {
-            return wrapped.command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context);
+                final OperationContext operationContext) {
+            return wrapped.command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext);
         }
 
         @Override
         public <T> T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
                 @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-                final BindingContext context, final boolean responseExpected,
+                final OperationContext operationContext, final boolean responseExpected,
                 @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) {
-            return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, context,
+            return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext,
                     responseExpected, payload, payloadFieldNameValidator);
         }
 
@@ -356,19 +355,19 @@ public ConnectionDescription getDescription() {
 
         @Override
         public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-                @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
-                final SingleResultCallback<T> callback) {
+                @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
+                final OperationContext operationContext, final SingleResultCallback<T> callback) {
             wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder,
-                    context, callback);
+                    operationContext, callback);
         }
 
         @Override
         public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
-                @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
-                final boolean responseExpected, @Nullable final SplittablePayload payload,
+                @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
+                final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload,
                 @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback<T> callback) {
             wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder,
-                    context, responseExpected, payload, payloadFieldNameValidator, callback);
+                    operationContext, responseExpected, payload, payloadFieldNameValidator, callback);
         }
 
         @Override
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java
index 3b053490464..01d5f587fdc 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java
@@ -20,7 +20,6 @@
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.internal.session.SessionContext;
@@ -70,39 +69,38 @@ public ConnectionDescription getDescription() {
     @Nullable
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context) {
-        return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, true, null, null);
+            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext) {
+        return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null);
     }
 
     @Nullable
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
             @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-            final BindingContext context, final boolean responseExpected,
+            final OperationContext operationContext, final boolean responseExpected,
             @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) {
-        return executeProtocol(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference,
-                        commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode,
-                        context.getServerApi(), context.getRequestContext(), context.getOperationContext()),
-                context.getSessionContext());
+        return executeProtocol(
+                new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, commandResultDecoder,
+                        responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext),
+                operationContext.getSessionContext());
     }
 
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
+            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext,
             final SingleResultCallback<T> callback) {
         commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder,
-                context, true, null, null, callback);
+                operationContext, true, null, null, callback);
     }
 
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
-            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
+            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext,
             final boolean responseExpected, @Nullable final SplittablePayload payload,
             @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback<T> callback) {
         executeProtocolAsync(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference,
-                        commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode,
-                        context.getServerApi(), context.getRequestContext(), context.getOperationContext()),
-                context.getSessionContext(), callback);
+                        commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext),
+                operationContext.getSessionContext(), callback);
     }
 
     @Override
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java
index 55030a6db34..656c9bc7779 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java
@@ -29,10 +29,10 @@
 import com.mongodb.event.ServerHeartbeatStartedEvent;
 import com.mongodb.event.ServerHeartbeatSucceededEvent;
 import com.mongodb.event.ServerMonitorListener;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.internal.inject.Provider;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.internal.validator.NoOpFieldNameValidator;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBoolean;
@@ -73,6 +73,7 @@ class DefaultServerMonitor implements ServerMonitor {
     private final ServerId serverId;
     private final ServerMonitorListener serverMonitorListener;
     private final Provider<SdamServerDescriptionManager> sdamProvider;
+    private final InternalOperationContextFactory operationContextFactory;
     private final InternalConnectionFactory internalConnectionFactory;
     private final ClusterConnectionMode clusterConnectionMode;
     @Nullable
@@ -85,22 +86,24 @@ class DefaultServerMonitor implements ServerMonitor {
      */
     @Nullable
     private RoundTripTimeMonitor roundTripTimeMonitor;
-    private final ExponentiallyWeightedMovingAverage averageRoundTripTime = new ExponentiallyWeightedMovingAverage(0.2);
+    private final RoundTripTimeSampler roundTripTimeSampler = new RoundTripTimeSampler();
     private final Lock lock = new ReentrantLock();
     private final Condition condition = lock.newCondition();
     private volatile boolean isClosed;
 
     DefaultServerMonitor(final ServerId serverId, final ServerSettings serverSettings,
             final InternalConnectionFactory internalConnectionFactory,
-                         final ClusterConnectionMode clusterConnectionMode,
-                         @Nullable final ServerApi serverApi,
-                         final boolean isFunctionAsAServiceEnvironment,
-                         final Provider<SdamServerDescriptionManager> sdamProvider) {
+            final ClusterConnectionMode clusterConnectionMode,
+            @Nullable final ServerApi serverApi,
+            final boolean isFunctionAsAServiceEnvironment,
+            final Provider<SdamServerDescriptionManager> sdamProvider,
+            final InternalOperationContextFactory operationContextFactory) {
         this.serverSettings = notNull("serverSettings", serverSettings);
         this.serverId = notNull("serverId", serverId);
         this.serverMonitorListener = singleServerMonitorListener(serverSettings);
         this.internalConnectionFactory = notNull("internalConnectionFactory", internalConnectionFactory);
         this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode);
+        this.operationContextFactory = assertNotNull(operationContextFactory);
         this.serverApi = serverApi;
         this.isFunctionAsAServiceEnvironment = isFunctionAsAServiceEnvironment;
         this.sdamProvider = sdamProvider;
@@ -135,7 +138,7 @@ public void close() {
             isClosed = true;
             //noinspection EmptyTryBlock
             try (ServerMonitor ignoredAutoClosed = monitor;
-                RoundTripTimeMonitor ignoredAutoClose2 = roundTripTimeMonitor) {
+                 RoundTripTimeMonitor ignoredAutoClose2 = roundTripTimeMonitor) {
                 // we are automatically closing resources here
             }
         });
@@ -213,9 +216,9 @@ private ServerDescription lookupServerDescription(final ServerDescription curren
                 if (connection == null || connection.isClosed()) {
                     currentCheckCancelled = false;
                     InternalConnection newConnection = internalConnectionFactory.create(serverId);
-                    newConnection.open();
+                    newConnection.open(operationContextFactory.create());
                     connection = newConnection;
-                    averageRoundTripTime.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos());
+                    roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos());
                     return connection.getInitialServerDescription();
                 }
 
@@ -228,7 +231,7 @@ private ServerDescription lookupServerDescription(final ServerDescription curren
 
                 long start = System.nanoTime();
                 try {
-                    SessionContext sessionContext = NoOpSessionContext.INSTANCE;
+                    OperationContext operationContext = operationContextFactory.create();
                     if (!connection.hasMoreToCome()) {
                         BsonDocument helloDocument = new BsonDocument(getHandshakeCommandName(currentServerDescription), new BsonInt32(1))
                                 .append("helloOk", BsonBoolean.TRUE);
@@ -238,26 +241,26 @@ private ServerDescription lookupServerDescription(final ServerDescription curren
                         }
 
                         connection.send(createCommandMessage(helloDocument, connection, currentServerDescription), new BsonDocumentCodec(),
-                                sessionContext);
+                                operationContext);
                     }
 
                     BsonDocument helloResult;
                     if (shouldStreamResponses) {
-                        helloResult = connection.receive(new BsonDocumentCodec(), sessionContext,
-                                Math.toIntExact(serverSettings.getHeartbeatFrequency(MILLISECONDS)));
+                        helloResult = connection.receive(new BsonDocumentCodec(), operationContextWithAdditionalTimeout(operationContext));
                     } else {
-                        helloResult = connection.receive(new BsonDocumentCodec(), sessionContext);
+                        helloResult = connection.receive(new BsonDocumentCodec(), operationContext);
                     }
 
                     long elapsedTimeNanos = System.nanoTime() - start;
                     if (!shouldStreamResponses) {
-                        averageRoundTripTime.addSample(elapsedTimeNanos);
+                        roundTripTimeSampler.addSample(elapsedTimeNanos);
                     }
                     serverMonitorListener.serverHeartbeatSucceeded(
                             new ServerHeartbeatSucceededEvent(connection.getDescription().getConnectionId(), helloResult,
                                     elapsedTimeNanos, shouldStreamResponses));
 
-                    return createServerDescription(serverId.getAddress(), helloResult, averageRoundTripTime.getAverage());
+                    return createServerDescription(serverId.getAddress(), helloResult, roundTripTimeSampler.getAverage(),
+                            roundTripTimeSampler.getMin());
                 } catch (Exception e) {
                     serverMonitorListener.serverHeartbeatFailed(
                             new ServerHeartbeatFailedEvent(connection.getDescription().getConnectionId(), System.nanoTime() - start,
@@ -265,7 +268,7 @@ private ServerDescription lookupServerDescription(final ServerDescription curren
                     throw e;
                 }
             } catch (Throwable t) {
-                averageRoundTripTime.reset();
+                roundTripTimeSampler.reset();
                 InternalConnection localConnection = withLock(lock, () -> {
                     InternalConnection result = connection;
                     connection = null;
@@ -278,6 +281,12 @@ private ServerDescription lookupServerDescription(final ServerDescription curren
             }
         }
 
+        private OperationContext operationContextWithAdditionalTimeout(final OperationContext originalOperationContext) {
+            TimeoutContext newTimeoutContext = originalOperationContext.getTimeoutContext()
+                    .withAdditionalReadTimeout(Math.toIntExact(serverSettings.getHeartbeatFrequency(MILLISECONDS)));
+            return originalOperationContext.withTimeoutContext(newTimeoutContext);
+        }
+
         private boolean shouldStreamResponses(final ServerDescription currentServerDescription) {
             boolean serverSupportsStreaming = currentServerDescription.getTopologyVersion() != null;
             switch (serverSettings.getServerMonitoringMode()) {
@@ -297,7 +306,7 @@ private boolean shouldStreamResponses(final ServerDescription currentServerDescr
         }
 
         private CommandMessage createCommandMessage(final BsonDocument command, final InternalConnection connection,
-                                                    final ServerDescription currentServerDescription) {
+                final ServerDescription currentServerDescription) {
             return new CommandMessage(new MongoNamespace("admin", COMMAND_COLLECTION_NAME), command,
                     new NoOpFieldNameValidator(), primary(),
                     MessageSettings.builder()
@@ -307,7 +316,7 @@ private CommandMessage createCommandMessage(final BsonDocument command, final In
         }
 
         private void logStateChange(final ServerDescription previousServerDescription,
-                                    final ServerDescription currentServerDescription) {
+                final ServerDescription currentServerDescription) {
             if (shouldLogStageChange(previousServerDescription, currentServerDescription)) {
                 if (currentServerDescription.getException() != null) {
                     LOGGER.info(format("Exception in monitor thread while connecting to server %s", serverId.getAddress()),
@@ -395,12 +404,12 @@ static boolean shouldLogStageChange(final ServerDescription previous, final Serv
         }
         ObjectId previousElectionId = previous.getElectionId();
         if (previousElectionId != null
-                    ? !previousElectionId.equals(current.getElectionId()) : current.getElectionId() != null) {
+                ? !previousElectionId.equals(current.getElectionId()) : current.getElectionId() != null) {
             return true;
         }
         Integer setVersion = previous.getSetVersion();
         if (setVersion != null
-                    ? !setVersion.equals(current.getSetVersion()) : current.getSetVersion() != null) {
+                ? !setVersion.equals(current.getSetVersion()) : current.getSetVersion() != null) {
             return true;
         }
 
@@ -470,17 +479,18 @@ public void run() {
         private void initialize() {
             connection = null;
             connection = internalConnectionFactory.create(serverId);
-            connection.open();
-            averageRoundTripTime.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos());
+            connection.open(operationContextFactory.create());
+            roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos());
         }
 
         private void pingServer(final InternalConnection connection) {
             long start = System.nanoTime();
+            OperationContext operationContext = operationContextFactory.create();
             executeCommand("admin",
                     new BsonDocument(getHandshakeCommandName(connection.getInitialServerDescription()), new BsonInt32(1)),
-                    clusterConnectionMode, serverApi, connection);
+                    clusterConnectionMode, serverApi, connection, operationContext);
             long elapsedTimeNanos = System.nanoTime() - start;
-            averageRoundTripTime.addSample(elapsedTimeNanos);
+            roundTripTimeSampler.addSample(elapsedTimeNanos);
         }
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java
index e220d88bb31..26f73bcee9c 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java
@@ -87,11 +87,12 @@ static ConnectionDescription createConnectionDescription(final ClusterConnection
     }
 
     public static ServerDescription createServerDescription(final ServerAddress serverAddress, final BsonDocument helloResult,
-                                                            final long roundTripTime) {
+                                                            final long roundTripTime, final long minRoundTripTime) {
         return ServerDescription.builder()
                                 .state(CONNECTED)
                                 .address(serverAddress)
                                 .type(getServerType(helloResult))
+                                .cryptd(helloResult.getBoolean("iscryptd", BsonBoolean.FALSE).getValue())
                                 .canonicalAddress(helloResult.containsKey("me") ? helloResult.getString("me").getValue() : null)
                                 .hosts(listToSet(helloResult.getArray("hosts", new BsonArray())))
                                 .passives(listToSet(helloResult.getArray("passives", new BsonArray())))
@@ -107,6 +108,7 @@ public static ServerDescription createServerDescription(final ServerAddress serv
                                 .topologyVersion(getTopologyVersion(helloResult))
                                 .lastWriteDate(getLastWriteDate(helloResult))
                                 .roundTripTime(roundTripTime, NANOSECONDS)
+                                .minRoundTripTime(minRoundTripTime, NANOSECONDS)
                                 .logicalSessionTimeoutMinutes(getLogicalSessionTimeoutMinutes(helloResult))
                                 .helloOk(helloResult.getBoolean("helloOk", BsonBoolean.FALSE).getValue())
                                 .ok(CommandHelper.isCommandOk(helloResult)).build();
diff --git a/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java
index 3831d2bfa35..ed5e55b822a 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java
@@ -171,7 +171,7 @@ <A> void read(
     <A> void write(
             ByteBuffer src,
             long timeout, TimeUnit unit,
-            A attach, CompletionHandler<Integer, ? super A> handler);
+            @Nullable A attach, CompletionHandler<Integer, ? super A> handler);
 
     /**
      * Writes a sequence of bytes to this channel from a subsequence of the given
@@ -233,5 +233,5 @@ <A> void write(
     <A> void write(
             ByteBuffer[] srcs, int offset, int length,
             long timeout, TimeUnit unit,
-            A attach, CompletionHandler<Long, ? super A> handler);
+            @Nullable A attach, CompletionHandler<Long, ? super A> handler);
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java
index e2b0188572e..792c33570b7 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java
@@ -16,11 +16,9 @@
 
 package com.mongodb.internal.connection;
 
-import com.mongodb.RequestContext;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.ByteBuf;
 import org.bson.codecs.Decoder;
@@ -50,15 +48,18 @@ public interface InternalConnection extends BufferProvider {
 
     /**
      * Opens the connection so its ready for use. Will perform a handshake.
+     *
+     * @param operationContext the operation context
      */
-    void open();
+    void open(OperationContext operationContext);
 
     /**
      * Opens the connection so its ready for use
      *
-     * @param callback the callback to be called once the connection has been opened
+     * @param operationContext the operation context
+     * @param callback         the callback to be called once the connection has been opened
      */
-    void openAsync(SingleResultCallback<Void> callback);
+    void openAsync(OperationContext operationContext, SingleResultCallback<Void> callback);
 
     /**
      * Closes the connection.
@@ -90,22 +91,14 @@ public interface InternalConnection extends BufferProvider {
      * Send a command message to the server.
      *
      * @param message          the command message to send
-     * @param sessionContext   the session context
-     * @param requestContext   the request context
      * @param operationContext the operation context
      */
     @Nullable
-    <T> T sendAndReceive(CommandMessage message, Decoder<T> decoder, SessionContext sessionContext, RequestContext requestContext,
-            OperationContext operationContext);
+    <T> T sendAndReceive(CommandMessage message, Decoder<T> decoder, OperationContext operationContext);
 
-    <T> void send(CommandMessage message, Decoder<T> decoder, SessionContext sessionContext);
+    <T> void send(CommandMessage message, Decoder<T> decoder, OperationContext operationContext);
 
-    <T> T receive(Decoder<T> decoder, SessionContext sessionContext);
-
-
-    default <T> T receive(Decoder<T> decoder, SessionContext sessionContext, int additionalTimeout) {
-        throw new UnsupportedOperationException();
-    }
+    <T> T receive(Decoder<T> decoder, OperationContext operationContext);
 
     boolean hasMoreToCome();
 
@@ -113,45 +106,47 @@ default <T> T receive(Decoder<T> decoder, SessionContext sessionContext, int add
      * Send a command message to the server.
      *
      * @param message          the command message to send
-     * @param sessionContext   the session context
-     * @param operationContext the operation context
      * @param callback         the callback
      */
-    <T> void sendAndReceiveAsync(CommandMessage message, Decoder<T> decoder, SessionContext sessionContext, RequestContext requestContext,
-            OperationContext operationContext, SingleResultCallback<T> callback);
+    <T> void sendAndReceiveAsync(CommandMessage message, Decoder<T> decoder, OperationContext operationContext, SingleResultCallback<T> callback);
 
     /**
      * Send a message to the server. The connection may not make any attempt to validate the integrity of the message.
      *
      * @param byteBuffers   the list of byte buffers to send.
      * @param lastRequestId the request id of the last message in byteBuffers
+     * @param operationContext the operation context
      */
-    void sendMessage(List<ByteBuf> byteBuffers, int lastRequestId);
+    void sendMessage(List<ByteBuf> byteBuffers, int lastRequestId, OperationContext operationContext);
 
     /**
      * Receive a response to a sent message from the server.
      *
      * @param responseTo the request id that this message is a response to
+     * @param operationContext the operation context
      * @return the response
      */
-    ResponseBuffers receiveMessage(int responseTo);
+    ResponseBuffers receiveMessage(int responseTo, OperationContext operationContext);
 
     /**
      * Asynchronously send a message to the server. The connection may not make any attempt to validate the integrity of the message.
      *
      * @param byteBuffers   the list of byte buffers to send
      * @param lastRequestId the request id of the last message in byteBuffers
+     * @param operationContext the operation context
      * @param callback      the callback to invoke on completion
      */
-    void sendMessageAsync(List<ByteBuf> byteBuffers, int lastRequestId, SingleResultCallback<Void> callback);
+    void sendMessageAsync(List<ByteBuf> byteBuffers, int lastRequestId, OperationContext operationContext,
+            SingleResultCallback<Void> callback);
 
     /**
      * Asynchronously receive a response to a sent message from the server.
      *
      * @param responseTo the request id that this message is a response to
+     * @param operationContext the operation context
      * @param callback the callback to invoke on completion
      */
-    void receiveMessageAsync(int responseTo, SingleResultCallback<ResponseBuffers> callback);
+    void receiveMessageAsync(int responseTo, OperationContext operationContext, SingleResultCallback<ResponseBuffers> callback);
 
     default void markAsPinned(Connection.PinningMode pinningMode) {
     }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java
index 9826f20b69b..077e2c68254 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java
@@ -20,14 +20,19 @@
 
 interface InternalConnectionInitializer {
 
-    InternalConnectionInitializationDescription startHandshake(InternalConnection internalConnection);
+    InternalConnectionInitializationDescription startHandshake(InternalConnection internalConnection,
+                                                               OperationContext operationContext);
 
     InternalConnectionInitializationDescription finishHandshake(InternalConnection internalConnection,
-                                                                InternalConnectionInitializationDescription description);
+                                                                InternalConnectionInitializationDescription description,
+                                                                OperationContext operationContext);
 
     void startHandshakeAsync(InternalConnection internalConnection,
+                             OperationContext operationContext,
                              SingleResultCallback<InternalConnectionInitializationDescription> callback);
 
-    void finishHandshakeAsync(InternalConnection internalConnection, InternalConnectionInitializationDescription description,
+    void finishHandshakeAsync(InternalConnection internalConnection,
+                              InternalConnectionInitializationDescription description,
+                              OperationContext operationContext,
                               SingleResultCallback<InternalConnectionInitializationDescription> callback);
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java
new file mode 100644
index 00000000000..4653c90050b
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.connection;
+
+import com.mongodb.ServerApi;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
+import com.mongodb.lang.Nullable;
+
+import static com.mongodb.internal.connection.OperationContext.simpleOperationContext;
+
+public final class InternalOperationContextFactory {
+
+    private final TimeoutSettings timeoutSettings;
+    @Nullable
+    private final ServerApi serverApi;
+
+    public InternalOperationContextFactory(final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) {
+        this.timeoutSettings = timeoutSettings;
+        this.serverApi = serverApi;
+    }
+
+    /**
+     * @return a simple operation context without timeoutMS
+     */
+    OperationContext create() {
+        return simpleOperationContext(timeoutSettings.connectionOnly(), serverApi);
+    }
+
+    /**
+     * @return a simple operation context with timeoutMS if set at the MongoClientSettings level
+     */
+
+    OperationContext createMaintenanceContext() {
+        return create().withTimeoutContext(TimeoutContext.createMaintenanceTimeoutContext(timeoutSettings));
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java
index fc90ce81bef..8c1b273c52b 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java
@@ -23,11 +23,12 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoInternalException;
 import com.mongodb.MongoInterruptedException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSocketClosedException;
 import com.mongodb.MongoSocketReadException;
 import com.mongodb.MongoSocketReadTimeoutException;
 import com.mongodb.MongoSocketWriteException;
-import com.mongodb.RequestContext;
+import com.mongodb.MongoSocketWriteTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.annotations.NotThreadSafe;
 import com.mongodb.connection.AsyncCompletionHandler;
@@ -41,6 +42,7 @@
 import com.mongodb.connection.ServerType;
 import com.mongodb.event.CommandListener;
 import com.mongodb.internal.ResourceUtil;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.AsyncSupplier;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -48,6 +50,7 @@
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.internal.logging.StructuredLogger;
 import com.mongodb.internal.session.SessionContext;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinaryReader;
 import org.bson.BsonDocument;
@@ -73,6 +76,7 @@
 import static com.mongodb.assertions.Assertions.isTrue;
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.async.AsyncRunnable.beginAsync;
+import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException;
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.connection.Authenticator.shouldAuthenticate;
 import static com.mongodb.internal.connection.CommandHelper.HELLO;
@@ -219,16 +223,19 @@ public int getGeneration() {
     }
 
     @Override
-    public void open() {
+    public void open(final OperationContext originalOperationContext) {
         isTrue("Open already called", stream == null);
         stream = streamFactory.create(serverId.getAddress());
         try {
-            stream.open();
+            OperationContext operationContext = originalOperationContext
+                    .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext());
 
-            InternalConnectionInitializationDescription initializationDescription = connectionInitializer.startHandshake(this);
+            stream.open(operationContext);
+
+            InternalConnectionInitializationDescription initializationDescription = connectionInitializer.startHandshake(this, operationContext);
             initAfterHandshakeStart(initializationDescription);
 
-            initializationDescription = connectionInitializer.finishHandshake(this, initializationDescription);
+            initializationDescription = connectionInitializer.finishHandshake(this, initializationDescription, operationContext);
             initAfterHandshakeFinish(initializationDescription);
         } catch (Throwable t) {
             close();
@@ -241,14 +248,18 @@ public void open() {
     }
 
     @Override
-    public void openAsync(final SingleResultCallback<Void> callback) {
+    public void openAsync(final OperationContext originalOperationContext, final SingleResultCallback<Void> callback) {
         assertNull(stream);
         try {
+            OperationContext operationContext = originalOperationContext
+                    .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext());
+
             stream = streamFactory.create(serverId.getAddress());
-            stream.openAsync(new AsyncCompletionHandler<Void>() {
+            stream.openAsync(operationContext, new AsyncCompletionHandler<Void>() {
+
                 @Override
                 public void completed(@Nullable final Void aVoid) {
-                    connectionInitializer.startHandshakeAsync(InternalStreamConnection.this,
+                    connectionInitializer.startHandshakeAsync(InternalStreamConnection.this, operationContext,
                             (initialResult, initialException) -> {
                                     if (initialException != null) {
                                         close();
@@ -257,7 +268,7 @@ public void completed(@Nullable final Void aVoid) {
                                         assertNotNull(initialResult);
                                         initAfterHandshakeStart(initialResult);
                                         connectionInitializer.finishHandshakeAsync(InternalStreamConnection.this,
-                                                initialResult, (completedResult, completedException) ->  {
+                                                initialResult, operationContext, (completedResult, completedException) ->  {
                                                         if (completedException != null) {
                                                             close();
                                                             callback.onResult(null, completedException);
@@ -360,46 +371,46 @@ public boolean isClosed() {
 
     @Nullable
     @Override
-    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                                final RequestContext requestContext, final OperationContext operationContext) {
-
+    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
         Supplier<T> sendAndReceiveInternal = () -> sendAndReceiveInternal(
-                message, decoder, sessionContext, requestContext, operationContext);
+                message, decoder, operationContext);
         try {
             return sendAndReceiveInternal.get();
         } catch (MongoCommandException e) {
             if (reauthenticationIsTriggered(e)) {
-                return reauthenticateAndRetry(sendAndReceiveInternal);
+                return reauthenticateAndRetry(sendAndReceiveInternal, operationContext);
             }
             throw e;
         }
     }
 
     @Override
-    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-            final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback<T> callback) {
+    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
+                                        final OperationContext operationContext,
+                                        final SingleResultCallback<T> callback) {
 
         AsyncSupplier<T> sendAndReceiveAsyncInternal = c -> sendAndReceiveAsyncInternal(
-                message, decoder, sessionContext, requestContext, operationContext, c);
+                message, decoder, operationContext, c);
         beginAsync().<T>thenSupply(c -> {
             sendAndReceiveAsyncInternal.getAsync(c);
         }).onErrorIf(e -> reauthenticationIsTriggered(e), (t, c) -> {
-            reauthenticateAndRetryAsync(sendAndReceiveAsyncInternal, c);
+            reauthenticateAndRetryAsync(sendAndReceiveAsyncInternal, operationContext, c);
         }).finish(callback);
     }
 
-    private <T> T reauthenticateAndRetry(final Supplier<T> operation) {
+    private <T> T reauthenticateAndRetry(final Supplier<T> operation, final OperationContext operationContext) {
         authenticated.set(false);
-        assertNotNull(authenticator).reauthenticate(this);
+        assertNotNull(authenticator).reauthenticate(this, operationContext);
         authenticated.set(true);
         return operation.get();
     }
 
     private <T> void reauthenticateAndRetryAsync(final AsyncSupplier<T> operation,
+            final OperationContext operationContext,
             final SingleResultCallback<T> callback) {
         beginAsync().thenRun(c -> {
             authenticated.set(false);
-            assertNotNull(authenticator).reauthenticateAsync(this, c);
+            assertNotNull(authenticator).reauthenticateAsync(this, operationContext, c);
         }).<T>thenSupply((c) -> {
             authenticated.set(true);
             operation.getAsync(c);
@@ -419,15 +430,14 @@ public boolean reauthenticationIsTriggered(@Nullable final Throwable t) {
 
     @Nullable
     private <T> T sendAndReceiveInternal(final CommandMessage message, final Decoder<T> decoder,
-            final SessionContext sessionContext, final RequestContext requestContext,
             final OperationContext operationContext) {
         CommandEventSender commandEventSender;
         try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) {
-            message.encode(bsonOutput, sessionContext);
-            commandEventSender = createCommandEventSender(message, bsonOutput, requestContext, operationContext);
+            message.encode(bsonOutput, operationContext);
+            commandEventSender = createCommandEventSender(message, bsonOutput, operationContext);
             commandEventSender.sendStartedEvent();
             try {
-                sendCommandMessage(message, bsonOutput, sessionContext);
+                sendCommandMessage(message, bsonOutput, operationContext);
             } catch (Exception e) {
                 commandEventSender.sendFailedEvent(e);
                 throw e;
@@ -435,7 +445,7 @@ private <T> T sendAndReceiveInternal(final CommandMessage message, final Decoder
         }
 
         if (message.isResponseExpected()) {
-            return receiveCommandMessageResponse(decoder, commandEventSender, sessionContext, 0);
+            return receiveCommandMessageResponse(decoder, commandEventSender, operationContext);
         } else {
             commandEventSender.sendSucceededEventForOneWayCommand();
             return null;
@@ -443,10 +453,10 @@ private <T> T sendAndReceiveInternal(final CommandMessage message, final Decoder
     }
 
     @Override
-    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
+    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
         try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) {
-            message.encode(bsonOutput, sessionContext);
-            sendCommandMessage(message, bsonOutput, sessionContext);
+            message.encode(bsonOutput, operationContext);
+            sendCommandMessage(message, bsonOutput, operationContext);
             if (message.isResponseExpected()) {
                 hasMoreToCome = true;
             }
@@ -454,15 +464,9 @@ public <T> void send(final CommandMessage message, final Decoder<T> decoder, fin
     }
 
     @Override
-    public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
+    public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
         isTrue("Response is expected", hasMoreToCome);
-        return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), sessionContext, 0);
-    }
-
-    @Override
-    public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext, final int additionalTimeout) {
-        isTrue("Response is expected", hasMoreToCome);
-        return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), sessionContext, additionalTimeout);
+        return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), operationContext);
     }
 
     @Override
@@ -470,56 +474,57 @@ public boolean hasMoreToCome() {
         return hasMoreToCome;
     }
 
-    private void sendCommandMessage(final CommandMessage message,
-                                    final ByteBufferBsonOutput bsonOutput, final SessionContext sessionContext) {
+    private void sendCommandMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput,
+            final OperationContext operationContext) {
 
         Compressor localSendCompressor = sendCompressor;
         if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) {
-            List<ByteBuf> byteBuffers = bsonOutput.getByteBuffers();
-            try {
-                sendMessage(byteBuffers, message.getId());
-            } finally {
-                ResourceUtil.release(byteBuffers);
-                bsonOutput.close();
-            }
+            trySendMessage(message, bsonOutput, operationContext);
         } else {
             ByteBufferBsonOutput compressedBsonOutput;
             List<ByteBuf> byteBuffers = bsonOutput.getByteBuffers();
             try {
                 CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor,
-                        getMessageSettings(description));
+                        getMessageSettings(description, initialServerDescription));
                 compressedBsonOutput = new ByteBufferBsonOutput(this);
-                compressedMessage.encode(compressedBsonOutput, sessionContext);
+                compressedMessage.encode(compressedBsonOutput, operationContext);
             } finally {
                 ResourceUtil.release(byteBuffers);
                 bsonOutput.close();
             }
-            List<ByteBuf> compressedByteBuffers = compressedBsonOutput.getByteBuffers();
-            try {
-                sendMessage(compressedByteBuffers, message.getId());
-            } finally {
-                ResourceUtil.release(compressedByteBuffers);
-                compressedBsonOutput.close();
-            }
+            trySendMessage(message, compressedBsonOutput, operationContext);
         }
         responseTo = message.getId();
     }
 
-    private <T> T receiveCommandMessageResponse(final Decoder<T> decoder,
-                                                final CommandEventSender commandEventSender, final SessionContext sessionContext,
-                                                final int additionalTimeout) {
+    private void trySendMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput,
+            final OperationContext operationContext) {
+        Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> {
+            throw TimeoutContext.createMongoRoundTripTimeoutException();
+        });
+        List<ByteBuf> byteBuffers = bsonOutput.getByteBuffers();
+        try {
+            sendMessage(byteBuffers, message.getId(), operationContext);
+        } finally {
+            ResourceUtil.release(byteBuffers);
+            bsonOutput.close();
+        }
+    }
+
+    private <T> T receiveCommandMessageResponse(final Decoder<T> decoder, final CommandEventSender commandEventSender,
+            final OperationContext operationContext) {
         boolean commandSuccessful = false;
-        try (ResponseBuffers responseBuffers = receiveMessageWithAdditionalTimeout(additionalTimeout)) {
-            updateSessionContext(sessionContext, responseBuffers);
+        try (ResponseBuffers responseBuffers = receiveResponseBuffers(operationContext)) {
+            updateSessionContext(operationContext.getSessionContext(), responseBuffers);
             if (!isCommandOk(responseBuffers)) {
                 throw getCommandFailureException(responseBuffers.getResponseDocument(responseTo,
-                        new BsonDocumentCodec()), description.getServerAddress());
+                        new BsonDocumentCodec()), description.getServerAddress(), operationContext.getTimeoutContext());
             }
 
             commandSuccessful = true;
             commandEventSender.sendSucceededEvent(responseBuffers);
 
-            T commandResult = getCommandResult(decoder, responseBuffers, responseTo);
+            T commandResult = getCommandResult(decoder, responseBuffers, responseTo, operationContext.getTimeoutContext());
             hasMoreToCome = responseBuffers.getReplyHeader().hasMoreToCome();
             if (hasMoreToCome) {
                 responseTo = responseBuffers.getReplyHeader().getRequestId();
@@ -536,8 +541,8 @@ private <T> T receiveCommandMessageResponse(final Decoder<T> decoder,
         }
     }
 
-    private <T> void sendAndReceiveAsyncInternal(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-            final RequestContext requestContext, final OperationContext operationContext, final SingleResultCallback<T> callback) {
+    private <T> void sendAndReceiveAsyncInternal(final CommandMessage message, final Decoder<T> decoder,
+                                                 final OperationContext operationContext, final SingleResultCallback<T> callback) {
         if (isClosed()) {
             callback.onResult(null, new MongoSocketClosedException("Can not read from a closed socket", getServerAddress()));
             return;
@@ -547,24 +552,24 @@ private <T> void sendAndReceiveAsyncInternal(final CommandMessage message, final
         ByteBufferBsonOutput compressedBsonOutput = new ByteBufferBsonOutput(this);
 
         try {
-            message.encode(bsonOutput, sessionContext);
-            CommandEventSender commandEventSender = createCommandEventSender(message, bsonOutput, requestContext, operationContext);
+            message.encode(bsonOutput, operationContext);
+            CommandEventSender commandEventSender = createCommandEventSender(message, bsonOutput, operationContext);
             commandEventSender.sendStartedEvent();
             Compressor localSendCompressor = sendCompressor;
             if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) {
-                sendCommandMessageAsync(message.getId(), decoder, sessionContext, callback, bsonOutput, commandEventSender,
+                sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, bsonOutput, commandEventSender,
                         message.isResponseExpected());
             } else {
                 List<ByteBuf> byteBuffers = bsonOutput.getByteBuffers();
                 try {
                     CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor,
-                            getMessageSettings(description));
-                    compressedMessage.encode(compressedBsonOutput, sessionContext);
+                            getMessageSettings(description, initialServerDescription));
+                    compressedMessage.encode(compressedBsonOutput, operationContext);
                 } finally {
                     ResourceUtil.release(byteBuffers);
                     bsonOutput.close();
                 }
-                sendCommandMessageAsync(message.getId(), decoder, sessionContext, callback, compressedBsonOutput, commandEventSender,
+                sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, compressedBsonOutput, commandEventSender,
                         message.isResponseExpected());
             }
         } catch (Throwable t) {
@@ -574,11 +579,21 @@ private <T> void sendAndReceiveAsyncInternal(final CommandMessage message, final
         }
     }
 
-    private <T> void sendCommandMessageAsync(final int messageId, final Decoder<T> decoder, final SessionContext sessionContext,
+    private <T> void sendCommandMessageAsync(final int messageId, final Decoder<T> decoder, final OperationContext operationContext,
                                              final SingleResultCallback<T> callback, final ByteBufferBsonOutput bsonOutput,
                                              final CommandEventSender commandEventSender, final boolean responseExpected) {
         List<ByteBuf> byteBuffers = bsonOutput.getByteBuffers();
-        sendMessageAsync(byteBuffers, messageId, (result, t) -> {
+
+        boolean[] shouldReturn = {false};
+        Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> {
+            callback.onResult(null, createMongoOperationTimeoutExceptionAndClose(commandEventSender));
+            shouldReturn[0] = true;
+        });
+        if (shouldReturn[0]) {
+            return;
+        }
+
+        sendMessageAsync(byteBuffers, messageId, operationContext, (result, t) -> {
             ResourceUtil.release(byteBuffers);
             bsonOutput.close();
             if (t != null) {
@@ -588,7 +603,7 @@ private <T> void sendCommandMessageAsync(final int messageId, final Decoder<T> d
                 commandEventSender.sendSucceededEventForOneWayCommand();
                 callback.onResult(null, null);
             } else {
-                readAsync(MESSAGE_HEADER_LENGTH, new MessageHeaderCallback((responseBuffers, t1) -> {
+                readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (responseBuffers, t1) -> {
                     if (t1 != null) {
                         commandEventSender.sendFailedEvent(t1);
                         callback.onResult(null, t1);
@@ -596,20 +611,20 @@ private <T> void sendCommandMessageAsync(final int messageId, final Decoder<T> d
                     }
                     assertNotNull(responseBuffers);
                     try {
-                        updateSessionContext(sessionContext, responseBuffers);
+                        updateSessionContext(operationContext.getSessionContext(), responseBuffers);
                         boolean commandOk =
                                 isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer())));
                         responseBuffers.reset();
                         if (!commandOk) {
                             MongoException commandFailureException = getCommandFailureException(
                                     responseBuffers.getResponseDocument(messageId, new BsonDocumentCodec()),
-                                    description.getServerAddress());
+                                    description.getServerAddress(), operationContext.getTimeoutContext());
                             commandEventSender.sendFailedEvent(commandFailureException);
                             throw commandFailureException;
                         }
                         commandEventSender.sendSucceededEvent(responseBuffers);
 
-                        T result1 = getCommandResult(decoder, responseBuffers, messageId);
+                        T result1 = getCommandResult(decoder, responseBuffers, messageId, operationContext.getTimeoutContext());
                         callback.onResult(result1, null);
                     } catch (Throwable localThrowable) {
                         callback.onResult(null, localThrowable);
@@ -621,9 +636,24 @@ private <T> void sendCommandMessageAsync(final int messageId, final Decoder<T> d
         });
     }
 
-    private <T> T getCommandResult(final Decoder<T> decoder, final ResponseBuffers responseBuffers, final int messageId) {
+    private MongoOperationTimeoutException createMongoOperationTimeoutExceptionAndClose(final CommandEventSender commandEventSender) {
+        MongoOperationTimeoutException e = TimeoutContext.createMongoRoundTripTimeoutException();
+        close();
+        commandEventSender.sendFailedEvent(e);
+        return e;
+    }
+
+    private <T> T getCommandResult(final Decoder<T> decoder,
+                                   final ResponseBuffers responseBuffers,
+                                   final int messageId,
+                                   final TimeoutContext timeoutContext) {
         T result = new ReplyMessage<>(responseBuffers, decoder, messageId).getDocument();
-        MongoException writeConcernBasedError = createSpecialWriteConcernException(responseBuffers, description.getServerAddress());
+        MongoException writeConcernBasedError = createSpecialWriteConcernException(responseBuffers,
+                description.getServerAddress(),
+                timeoutContext);
+        if (writeConcernBasedError instanceof MongoOperationTimeoutException) {
+            throw writeConcernBasedError;
+        }
         if (writeConcernBasedError != null) {
             throw new MongoWriteConcernWithResponseException(writeConcernBasedError, result);
         }
@@ -631,21 +661,24 @@ private <T> T getCommandResult(final Decoder<T> decoder, final ResponseBuffers r
     }
 
     @Override
-    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
+    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
         notNull("stream is open", stream);
         if (isClosed()) {
             throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress());
         }
         try {
-            stream.write(byteBuffers);
+            stream.write(byteBuffers, operationContext);
         } catch (Exception e) {
             close();
-            throwTranslatedWriteException(e);
+            throwTranslatedWriteException(e, operationContext);
         }
     }
 
     @Override
-    public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId,
+    public void sendMessageAsync(
+            final List<ByteBuf> byteBuffers,
+            final int lastRequestId,
+            final OperationContext operationContext,
             final SingleResultCallback<Void> callback) {
         beginAsync().thenRun((c) -> {
             notNull("stream is open", stream);
@@ -654,34 +687,26 @@ public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequ
             }
             c.complete(c);
         }).thenRunTryCatchAsyncBlocks(c -> {
-            stream.writeAsync(byteBuffers, c.asHandler());
+            stream.writeAsync(byteBuffers, operationContext, c.asHandler());
         }, Exception.class, (e, c) -> {
             close();
-            throwTranslatedWriteException(e);
+            throwTranslatedWriteException(e, operationContext);
         }).finish(errorHandlingCallback(callback, LOGGER));
     }
 
     @Override
-    public ResponseBuffers receiveMessage(final int responseTo) {
+    public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
         assertNotNull(stream);
         if (isClosed()) {
             throw new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress());
         }
 
-        return receiveMessageWithAdditionalTimeout(0);
-    }
-
-    private ResponseBuffers receiveMessageWithAdditionalTimeout(final int additionalTimeout) {
-        try {
-            return receiveResponseBuffers(additionalTimeout);
-        } catch (Throwable t) {
-            close();
-            throw translateReadException(t);
-        }
+        return receiveResponseBuffers(operationContext);
     }
 
     @Override
-    public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+    public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+            final SingleResultCallback<ResponseBuffers> callback) {
         assertNotNull(stream);
 
         if (isClosed()) {
@@ -689,7 +714,7 @@ public void receiveMessageAsync(final int responseTo, final SingleResultCallback
             return;
         }
 
-        readAsync(MESSAGE_HEADER_LENGTH, new MessageHeaderCallback((result, t) -> {
+        readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (result, t) -> {
             if (t != null) {
                 close();
                 callback.onResult(null, t);
@@ -699,14 +724,14 @@ public void receiveMessageAsync(final int responseTo, final SingleResultCallback
         }));
     }
 
-    private void readAsync(final int numBytes, final SingleResultCallback<ByteBuf> callback) {
+    private void readAsync(final int numBytes, final OperationContext operationContext, final SingleResultCallback<ByteBuf> callback) {
         if (isClosed()) {
             callback.onResult(null, new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress()));
             return;
         }
 
         try {
-            stream.readAsync(numBytes, new AsyncCompletionHandler<ByteBuf>() {
+            stream.readAsync(numBytes, operationContext, new AsyncCompletionHandler<ByteBuf>() {
                 @Override
                 public void completed(@Nullable final ByteBuf buffer) {
                     callback.onResult(buffer, null);
@@ -715,12 +740,12 @@ public void completed(@Nullable final ByteBuf buffer) {
                 @Override
                 public void failed(final Throwable t) {
                     close();
-                    callback.onResult(null, translateReadException(t));
+                    callback.onResult(null, translateReadException(t, operationContext));
                 }
             });
         } catch (Exception e) {
             close();
-            callback.onResult(null, translateReadException(e));
+            callback.onResult(null, translateReadException(e, operationContext));
         }
     }
 
@@ -744,25 +769,33 @@ private void updateSessionContext(final SessionContext sessionContext, final Res
         }
     }
 
-    private void throwTranslatedWriteException(final Throwable e) {
-        throw translateWriteException(e);
-    }
+    private void throwTranslatedWriteException(final Throwable e, final OperationContext operationContext) {
+        if (e instanceof MongoSocketWriteTimeoutException && operationContext.getTimeoutContext().hasTimeoutMS()) {
+            throw createMongoTimeoutException(e);
+        }
 
-    private MongoException translateWriteException(final Throwable e) {
         if (e instanceof MongoException) {
-            return (MongoException) e;
+            throw (MongoException) e;
         }
         Optional<MongoInterruptedException> interruptedException = translateInterruptedException(e, "Interrupted while sending message");
         if (interruptedException.isPresent()) {
-            return interruptedException.get();
+            throw interruptedException.get();
         } else if (e instanceof IOException) {
-            return new MongoSocketWriteException("Exception sending message", getServerAddress(), e);
+            throw new MongoSocketWriteException("Exception sending message", getServerAddress(), e);
         } else {
-            return new MongoInternalException("Unexpected exception", e);
+            throw new MongoInternalException("Unexpected exception", e);
         }
     }
 
-    private MongoException translateReadException(final Throwable e) {
+    private MongoException translateReadException(final Throwable e, final OperationContext operationContext) {
+        if (operationContext.getTimeoutContext().hasTimeoutMS()) {
+            if (e instanceof SocketTimeoutException) {
+                return createMongoTimeoutException(createReadTimeoutException((SocketTimeoutException) e));
+            } else if (e instanceof MongoSocketReadTimeoutException) {
+                return createMongoTimeoutException((e));
+            }
+        }
+
         if (e instanceof MongoException) {
             return (MongoException) e;
         }
@@ -770,7 +803,7 @@ private MongoException translateReadException(final Throwable e) {
         if (interruptedException.isPresent()) {
             return interruptedException.get();
         } else if (e instanceof SocketTimeoutException) {
-            return new MongoSocketReadTimeoutException("Timeout while receiving message", getServerAddress(), e);
+            return createReadTimeoutException((SocketTimeoutException) e);
         } else if (e instanceof IOException) {
             return new MongoSocketReadException("Exception receiving message", getServerAddress(), e);
         } else if (e instanceof RuntimeException) {
@@ -780,37 +813,47 @@ private MongoException translateReadException(final Throwable e) {
         }
     }
 
-    private ResponseBuffers receiveResponseBuffers(final int additionalTimeout) throws IOException {
-        ByteBuf messageHeaderBuffer = stream.read(MESSAGE_HEADER_LENGTH, additionalTimeout);
-        MessageHeader messageHeader;
-        try {
-            messageHeader = new MessageHeader(messageHeaderBuffer, description.getMaxMessageSize());
-        } finally {
-            messageHeaderBuffer.release();
-        }
+    private  MongoSocketReadTimeoutException createReadTimeoutException(final SocketTimeoutException e) {
+        return new MongoSocketReadTimeoutException("Timeout while receiving message",
+                getServerAddress(), e);
+    }
 
-        ByteBuf messageBuffer = stream.read(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, additionalTimeout);
-        boolean releaseMessageBuffer = true;
+    private ResponseBuffers receiveResponseBuffers(final OperationContext operationContext) {
         try {
-            if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) {
-                CompressedHeader compressedHeader = new CompressedHeader(messageBuffer, messageHeader);
+            ByteBuf messageHeaderBuffer = stream.read(MESSAGE_HEADER_LENGTH, operationContext);
+            MessageHeader messageHeader;
+            try {
+                messageHeader = new MessageHeader(messageHeaderBuffer, description.getMaxMessageSize());
+            } finally {
+                messageHeaderBuffer.release();
+            }
 
-                Compressor compressor = getCompressor(compressedHeader);
+            ByteBuf messageBuffer = stream.read(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext);
+            boolean releaseMessageBuffer = true;
+            try {
+                if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) {
+                    CompressedHeader compressedHeader = new CompressedHeader(messageBuffer, messageHeader);
 
-                ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize());
-                compressor.uncompress(messageBuffer, buffer);
+                    Compressor compressor = getCompressor(compressedHeader);
 
-                buffer.flip();
-                return new ResponseBuffers(new ReplyHeader(buffer, compressedHeader), buffer);
-            } else {
-                ResponseBuffers responseBuffers = new ResponseBuffers(new ReplyHeader(messageBuffer, messageHeader), messageBuffer);
-                releaseMessageBuffer = false;
-                return responseBuffers;
-            }
-        } finally {
-            if (releaseMessageBuffer) {
-                messageBuffer.release();
+                    ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize());
+                    compressor.uncompress(messageBuffer, buffer);
+
+                    buffer.flip();
+                    return new ResponseBuffers(new ReplyHeader(buffer, compressedHeader), buffer);
+                } else {
+                    ResponseBuffers responseBuffers = new ResponseBuffers(new ReplyHeader(messageBuffer, messageHeader), messageBuffer);
+                    releaseMessageBuffer = false;
+                    return responseBuffers;
+                }
+            } finally {
+                if (releaseMessageBuffer) {
+                    messageBuffer.release();
+                }
             }
+        } catch (Throwable t) {
+            close();
+            throw translateReadException(t, operationContext);
         }
     }
 
@@ -829,9 +872,11 @@ public ByteBuf getBuffer(final int size) {
     }
 
     private class MessageHeaderCallback implements SingleResultCallback<ByteBuf> {
+        private final OperationContext operationContext;
         private final SingleResultCallback<ResponseBuffers> callback;
 
-        MessageHeaderCallback(final SingleResultCallback<ResponseBuffers> callback) {
+        MessageHeaderCallback(final OperationContext operationContext, final SingleResultCallback<ResponseBuffers> callback) {
+            this.operationContext = operationContext;
             this.callback = callback;
         }
 
@@ -844,7 +889,8 @@ public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t
             try {
                 assertNotNull(result);
                 MessageHeader messageHeader = new MessageHeader(result, description.getMaxMessageSize());
-                readAsync(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, new MessageCallback(messageHeader));
+                readAsync(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext,
+                        new MessageCallback(messageHeader));
             } catch (Throwable localThrowable) {
                 callback.onResult(null, localThrowable);
             } finally {
@@ -906,14 +952,14 @@ public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t
     private static final StructuredLogger COMMAND_PROTOCOL_LOGGER = new StructuredLogger("protocol.command");
 
     private CommandEventSender createCommandEventSender(final CommandMessage message, final ByteBufferBsonOutput bsonOutput,
-            final RequestContext requestContext, final OperationContext operationContext) {
+                                                        final OperationContext operationContext) {
         boolean listensOrLogs = commandListener != null || COMMAND_PROTOCOL_LOGGER.isRequired(DEBUG, getClusterId());
         if (!recordEverything && (isMonitoringConnection || !opened() || !authenticated.get() || !listensOrLogs)) {
             return new NoOpCommandEventSender();
         }
         return new LoggingCommandEventSender(
                 SECURITY_SENSITIVE_COMMANDS, SECURITY_SENSITIVE_HELLO_COMMANDS, description, commandListener,
-                requestContext, operationContext, message, bsonOutput,
+                operationContext, message, bsonOutput,
                 COMMAND_PROTOCOL_LOGGER, loggerSettings);
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java
index d4858f3d973..ee509873e40 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java
@@ -50,6 +50,7 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public class InternalStreamConnectionInitializer implements InternalConnectionInitializer {
+    private static final int INITIAL_MIN_RTT = 0;
     private final ClusterConnectionMode clusterConnectionMode;
     private final Authenticator authenticator;
     private final BsonDocument clientMetadataDocument;
@@ -71,29 +72,31 @@ public InternalStreamConnectionInitializer(final ClusterConnectionMode clusterCo
     }
 
     @Override
-    public InternalConnectionInitializationDescription startHandshake(final InternalConnection internalConnection) {
+    public InternalConnectionInitializationDescription startHandshake(final InternalConnection internalConnection,
+                                                                      final OperationContext operationContext) {
         notNull("internalConnection", internalConnection);
 
-        return initializeConnectionDescription(internalConnection);
+        return initializeConnectionDescription(internalConnection, operationContext);
     }
 
     public InternalConnectionInitializationDescription finishHandshake(final InternalConnection internalConnection,
-                                                                       final InternalConnectionInitializationDescription description) {
+                                                                       final InternalConnectionInitializationDescription description,
+                                                                       final OperationContext operationContext) {
         notNull("internalConnection", internalConnection);
         notNull("description", description);
         final ConnectionDescription connectionDescription = description.getConnectionDescription();
         if (Authenticator.shouldAuthenticate(authenticator, connectionDescription)) {
-            authenticator.authenticate(internalConnection, connectionDescription);
+            authenticator.authenticate(internalConnection, connectionDescription, operationContext);
         }
-        return completeConnectionDescriptionInitialization(internalConnection, description);
+        return completeConnectionDescriptionInitialization(internalConnection, description, operationContext);
     }
 
     @Override
-    public void startHandshakeAsync(final InternalConnection internalConnection,
+    public void startHandshakeAsync(final InternalConnection internalConnection, final OperationContext operationContext,
                                     final SingleResultCallback<InternalConnectionInitializationDescription> callback) {
         long startTime = System.nanoTime();
         executeCommandAsync("admin", createHelloCommand(authenticator, internalConnection), clusterConnectionMode, serverApi,
-                internalConnection, (helloResult, t) -> {
+                internalConnection, operationContext, (helloResult, t) -> {
                     if (t != null) {
                         callback.onResult(null, t instanceof MongoException ? mapHelloException((MongoException) t) : t);
                     } else {
@@ -106,32 +109,36 @@ public void startHandshakeAsync(final InternalConnection internalConnection,
     @Override
     public void finishHandshakeAsync(final InternalConnection internalConnection,
                                      final InternalConnectionInitializationDescription description,
+                                     final OperationContext operationContext,
                                      final SingleResultCallback<InternalConnectionInitializationDescription> callback) {
         ConnectionDescription connectionDescription = description.getConnectionDescription();
 
         if (!Authenticator.shouldAuthenticate(authenticator, connectionDescription)) {
-            completeConnectionDescriptionInitializationAsync(internalConnection, description, callback);
+            completeConnectionDescriptionInitializationAsync(internalConnection, description, operationContext, callback);
         } else {
-            authenticator.authenticateAsync(internalConnection, connectionDescription,
+            authenticator.authenticateAsync(internalConnection, connectionDescription, operationContext,
                     (result1, t1) -> {
                         if (t1 != null) {
                             callback.onResult(null, t1);
                         } else {
-                            completeConnectionDescriptionInitializationAsync(internalConnection, description, callback);
+                            completeConnectionDescriptionInitializationAsync(internalConnection, description, operationContext, callback);
                         }
                     });
         }
     }
 
-    private InternalConnectionInitializationDescription initializeConnectionDescription(final InternalConnection internalConnection) {
+    private InternalConnectionInitializationDescription initializeConnectionDescription(final InternalConnection internalConnection,
+            final OperationContext operationContext) {
         BsonDocument helloResult;
         BsonDocument helloCommandDocument = createHelloCommand(authenticator, internalConnection);
 
         long start = System.nanoTime();
         try {
-            helloResult = executeCommand("admin", helloCommandDocument, clusterConnectionMode, serverApi, internalConnection);
+            helloResult = executeCommand("admin", helloCommandDocument, clusterConnectionMode, serverApi, internalConnection, operationContext);
         } catch (MongoException e) {
             throw mapHelloException(e);
+        } finally {
+            operationContext.getTimeoutContext().resetMaintenanceTimeout();
         }
         setSpeculativeAuthenticateResponse(helloResult);
         return createInitializationDescription(helloResult, internalConnection, start);
@@ -154,7 +161,7 @@ private InternalConnectionInitializationDescription createInitializationDescript
                 helloResult);
         ServerDescription serverDescription =
                 createServerDescription(internalConnection.getDescription().getServerAddress(), helloResult,
-                        System.nanoTime() - startTime);
+                        System.nanoTime() - startTime, INITIAL_MIN_RTT);
         return new InternalConnectionInitializationDescription(connectionDescription, serverDescription);
     }
 
@@ -191,7 +198,8 @@ private BsonDocument createHelloCommand(final Authenticator authenticator, final
 
     private InternalConnectionInitializationDescription completeConnectionDescriptionInitialization(
             final InternalConnection internalConnection,
-            final InternalConnectionInitializationDescription description) {
+            final InternalConnectionInitializationDescription description,
+            final OperationContext operationContext) {
 
         if (description.getConnectionDescription().getConnectionId().getServerValue() != null) {
             return description;
@@ -199,7 +207,7 @@ private InternalConnectionInitializationDescription completeConnectionDescriptio
 
         return applyGetLastErrorResult(executeCommandWithoutCheckingForFailure("admin",
                 new BsonDocument("getlasterror", new BsonInt32(1)), clusterConnectionMode, serverApi,
-                internalConnection),
+                internalConnection, operationContext),
                 description);
     }
 
@@ -213,6 +221,7 @@ private void setSpeculativeAuthenticateResponse(final BsonDocument helloResult)
     private void completeConnectionDescriptionInitializationAsync(
             final InternalConnection internalConnection,
             final InternalConnectionInitializationDescription description,
+            final OperationContext operationContext,
             final SingleResultCallback<InternalConnectionInitializationDescription> callback) {
 
         if (description.getConnectionDescription().getConnectionId().getServerValue() != null) {
@@ -221,7 +230,7 @@ private void completeConnectionDescriptionInitializationAsync(
         }
 
         executeCommandAsync("admin", new BsonDocument("getlasterror", new BsonInt32(1)), clusterConnectionMode, serverApi,
-                internalConnection,
+                internalConnection, operationContext,
                 (result, t) -> {
                     if (t != null) {
                         callback.onResult(description, null);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java
index efc6c4bfb47..ba47236cf4f 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java
@@ -18,6 +18,8 @@
 
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
+import com.mongodb.MongoInterruptedException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.annotations.ThreadSafe;
@@ -35,9 +37,11 @@
 import com.mongodb.event.ClusterOpeningEvent;
 import com.mongodb.event.ServerDescriptionChangedEvent;
 import com.mongodb.internal.Locks;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.selector.ServerSelector;
 
@@ -60,11 +64,9 @@
 import static com.mongodb.internal.connection.BaseCluster.logServerSelectionStarted;
 import static com.mongodb.internal.connection.BaseCluster.logServerSelectionSucceeded;
 import static com.mongodb.internal.event.EventListenerHelper.singleClusterListener;
-import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static java.lang.String.format;
 import static java.util.Collections.emptyList;
 import static java.util.Collections.singletonList;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 @ThreadSafe
@@ -181,9 +183,11 @@ public ClusterId getClusterId() {
     }
 
     @Override
-    public ServersSnapshot getServersSnapshot() {
+    public ServersSnapshot getServersSnapshot(
+            final Timeout serverSelectionTimeout,
+            final TimeoutContext timeoutContext) {
         isTrue("open", !isClosed());
-        waitForSrv();
+        waitForSrv(serverSelectionTimeout, timeoutContext);
         ClusterableServer server = assertNotNull(this.server);
         return serverAddress -> server;
     }
@@ -203,36 +207,32 @@ public ClusterClock getClock() {
     @Override
     public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) {
         isTrue("open", !isClosed());
-        waitForSrv();
+        Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout();
+        waitForSrv(computedServerSelectionTimeout, operationContext.getTimeoutContext());
         if (srvRecordResolvedToMultipleHosts) {
             throw createResolvedToMultipleHostsException();
         }
         ClusterDescription curDescription = description;
-        logServerSelectionStarted(clusterId, operationContext, serverSelector, curDescription);
+        logServerSelectionStarted(clusterId, operationContext.getId(), serverSelector, curDescription);
         ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0));
-        logServerSelectionSucceeded(clusterId, operationContext, serverTuple.getServerDescription().getAddress(), serverSelector, curDescription);
+        logServerSelectionSucceeded(clusterId, operationContext.getId(), serverTuple.getServerDescription().getAddress(),
+                serverSelector, curDescription);
         return serverTuple;
     }
 
-
-    private void waitForSrv() {
+    private void waitForSrv(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) {
         if (initializationCompleted) {
             return;
         }
         Locks.withLock(lock, () -> {
-            long remainingTimeNanos = getMaxWaitTimeNanos();
             while (!initializationCompleted) {
                 if (isClosed()) {
                     throw createShutdownException();
                 }
-                if (remainingTimeNanos <= 0) {
-                    throw createTimeoutException();
-                }
-                try {
-                    remainingTimeNanos = condition.awaitNanos(remainingTimeNanos);
-                } catch (InterruptedException e) {
-                    throw interruptAndCreateMongoInterruptedException(format("Interrupted while resolving SRV records for %s", settings.getSrvHost()), e);
-                }
+                serverSelectionTimeout.onExpired(() -> {
+                    throw createTimeoutException(timeoutContext);
+                });
+                serverSelectionTimeout.awaitOn(condition, () -> format("resolving SRV records for %s", settings.getSrvHost()));
             }
         });
     }
@@ -244,9 +244,9 @@ public void selectServerAsync(final ServerSelector serverSelector, final Operati
             callback.onResult(null, createShutdownException());
             return;
         }
-
-        ServerSelectionRequest serverSelectionRequest = new ServerSelectionRequest(
-                operationContext, serverSelector, getMaxWaitTimeNanos(), callback);
+        Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout();
+        ServerSelectionRequest serverSelectionRequest = new ServerSelectionRequest(operationContext.getId(), serverSelector,
+                operationContext, computedServerSelectionTimeout, callback);
         if (initializationCompleted) {
             handleServerSelectionRequest(serverSelectionRequest);
         } else {
@@ -298,9 +298,9 @@ private void handleServerSelectionRequest(final ServerSelectionRequest serverSel
         } else {
             ClusterDescription curDescription = description;
             logServerSelectionStarted(
-                    clusterId, serverSelectionRequest.operationContext, serverSelectionRequest.serverSelector, curDescription);
+                    clusterId, serverSelectionRequest.operationId, serverSelectionRequest.serverSelector, curDescription);
             ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0));
-            logServerSelectionSucceeded(clusterId, serverSelectionRequest.operationContext,
+            logServerSelectionSucceeded(clusterId, serverSelectionRequest.operationId,
                     serverTuple.getServerDescription().getAddress(), serverSelectionRequest.serverSelector, curDescription);
             serverSelectionRequest.onSuccess(serverTuple);
         }
@@ -311,23 +311,20 @@ private MongoClientException createResolvedToMultipleHostsException() {
                 + "to multiple hosts");
     }
 
-    private MongoTimeoutException createTimeoutException() {
+    private MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext) {
         MongoException localSrvResolutionException = srvResolutionException;
+        String message;
         if (localSrvResolutionException == null) {
-            return new MongoTimeoutException(format("Timed out after %d ms while waiting to resolve SRV records for %s.",
-                    settings.getServerSelectionTimeout(MILLISECONDS), settings.getSrvHost()));
+            message = format("Timed out while waiting to resolve SRV records for %s.", settings.getSrvHost());
         } else {
-            return new MongoTimeoutException(format("Timed out after %d ms while waiting to resolve SRV records for %s. "
-                            + "Resolution exception was '%s'",
-                    settings.getServerSelectionTimeout(MILLISECONDS), settings.getSrvHost(), localSrvResolutionException));
+            message = format("Timed out while waiting to resolve SRV records for %s. "
+                    + "Resolution exception was '%s'", settings.getSrvHost(), localSrvResolutionException);
         }
+        return createTimeoutException(timeoutContext, message);
     }
 
-    private long getMaxWaitTimeNanos() {
-        if (settings.getServerSelectionTimeout(NANOSECONDS) < 0) {
-            return Long.MAX_VALUE;
-        }
-        return settings.getServerSelectionTimeout(NANOSECONDS);
+    private static MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext, final String message) {
+        return timeoutContext.hasTimeoutMS() ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message);
     }
 
     private void notifyWaitQueueHandler(final ServerSelectionRequest request) {
@@ -362,32 +359,35 @@ public void run() {
                     if (isClosed() || initializationCompleted) {
                         break;
                     }
-                    long waitTimeNanos = Long.MAX_VALUE;
-                    long curTimeNanos = System.nanoTime();
+                    Timeout waitTimeNanos = Timeout.infinite();
 
                     for (Iterator<ServerSelectionRequest> iterator = waitQueue.iterator(); iterator.hasNext();) {
                         ServerSelectionRequest next = iterator.next();
-                        long remainingTime = next.getRemainingTime(curTimeNanos);
-                        if (remainingTime <= 0) {
-                            timeoutList.add(next);
-                            iterator.remove();
-                        } else {
-                            waitTimeNanos = Math.min(remainingTime, waitTimeNanos);
-                        }
+
+                        Timeout nextTimeout = next.getTimeout();
+                        Timeout waitTimeNanosFinal = waitTimeNanos;
+                        waitTimeNanos = nextTimeout.call(NANOSECONDS,
+                                () -> Timeout.earliest(waitTimeNanosFinal, nextTimeout),
+                                (ns) -> Timeout.earliest(waitTimeNanosFinal, nextTimeout),
+                                () -> {
+                                    timeoutList.add(next);
+                                    iterator.remove();
+                                    return waitTimeNanosFinal;
+                                });
                     }
                     if (timeoutList.isEmpty()) {
                         try {
-                            //noinspection ResultOfMethodCallIgnored
-                            condition.await(waitTimeNanos, NANOSECONDS);
-                        } catch (InterruptedException unexpected) {
+                            waitTimeNanos.awaitOn(condition, () -> "ignored");
+                        } catch (MongoInterruptedException unexpected) {
                             fail();
                         }
                     }
                 } finally {
                     lock.unlock();
                 }
-
-                timeoutList.forEach(request -> request.onError(createTimeoutException()));
+                timeoutList.forEach(request -> request.onError(createTimeoutException(request
+                        .getOperationContext()
+                        .getTimeoutContext())));
                 timeoutList.clear();
             }
 
@@ -405,24 +405,27 @@ public void run() {
     }
 
     private static final class ServerSelectionRequest {
-        private final OperationContext operationContext;
+        private final long operationId;
         private final ServerSelector serverSelector;
-        private final long maxWaitTimeNanos;
-        private final long startTimeNanos = System.nanoTime();
         private final SingleResultCallback<ServerTuple> callback;
+        private final Timeout timeout;
+        private final OperationContext operationContext;
 
-        private ServerSelectionRequest(
-                final OperationContext operationContext,
-                final ServerSelector serverSelector,
-                final long maxWaitTimeNanos, final SingleResultCallback<ServerTuple> callback) {
-            this.operationContext = operationContext;
+        private ServerSelectionRequest(final long operationId, final ServerSelector serverSelector, final OperationContext operationContext,
+                                       final Timeout timeout, final SingleResultCallback<ServerTuple> callback) {
+            this.operationId = operationId;
             this.serverSelector = serverSelector;
-            this.maxWaitTimeNanos = maxWaitTimeNanos;
+            this.timeout = timeout;
+            this.operationContext = operationContext;
             this.callback = callback;
         }
 
-        long getRemainingTime(final long curTimeNanos) {
-            return startTimeNanos + maxWaitTimeNanos - curTimeNanos;
+        Timeout getTimeout() {
+            return timeout;
+        }
+
+        OperationContext getOperationContext() {
+            return operationContext;
         }
 
         public void onSuccess(final ServerTuple serverTuple) {
diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java
index 0521e094cb1..bcd86fa5205 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java
@@ -51,6 +51,7 @@ public class LoadBalancedClusterableServerFactory implements ClusterableServerFa
     private final MongoDriverInformation mongoDriverInformation;
     private final List<MongoCompressor> compressorList;
     private final ServerApi serverApi;
+    private final InternalOperationContextFactory operationContextFactory;
 
     public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings,
             final ConnectionPoolSettings connectionPoolSettings,
@@ -59,7 +60,8 @@ public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings,
             final LoggerSettings loggerSettings,
             @Nullable final CommandListener commandListener,
             @Nullable final String applicationName, final MongoDriverInformation mongoDriverInformation,
-            final List<MongoCompressor> compressorList, @Nullable final ServerApi serverApi) {
+            final List<MongoCompressor> compressorList, @Nullable final ServerApi serverApi,
+            final InternalOperationContextFactory operationContextFactory) {
         this.serverSettings = serverSettings;
         this.connectionPoolSettings = connectionPoolSettings;
         this.internalConnectionPoolSettings = internalConnectionPoolSettings;
@@ -71,6 +73,7 @@ public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings,
         this.mongoDriverInformation = mongoDriverInformation;
         this.compressorList = compressorList;
         this.serverApi = serverApi;
+        this.operationContextFactory = operationContextFactory;
     }
 
     @Override
@@ -78,7 +81,7 @@ public ClusterableServer create(final Cluster cluster, final ServerAddress serve
         ConnectionPool connectionPool = new DefaultConnectionPool(new ServerId(cluster.getClusterId(), serverAddress),
                 new InternalStreamConnectionFactory(ClusterConnectionMode.LOAD_BALANCED, streamFactory, credential, applicationName,
                         mongoDriverInformation, compressorList, loggerSettings, commandListener, serverApi),
-                connectionPoolSettings, internalConnectionPoolSettings, EmptyProvider.instance());
+                connectionPoolSettings, internalConnectionPoolSettings, EmptyProvider.instance(), operationContextFactory);
         connectionPool.ready();
 
         return new LoadBalancedServer(new ServerId(cluster.getClusterId(), serverAddress), connectionPool, new DefaultConnectionFactory(),
diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java
index f55bd5c93dc..3820810ab9f 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java
@@ -154,13 +154,13 @@ ConnectionPool getConnectionPool() {
         return connectionPool;
     }
 
-    private class LoadBalancedServerProtocolExecutor implements ProtocolExecutor {
+    private class LoadBalancedServerProtocolExecutor extends AbstractProtocolExecutor {
         @SuppressWarnings("unchecked")
         @Override
         public <T> T execute(final CommandProtocol<T> protocol, final InternalConnection connection, final SessionContext sessionContext) {
             try {
-                protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock));
-                return protocol.execute(connection);
+                return protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock))
+                        .execute(connection);
             } catch (MongoWriteConcernWithResponseException e) {
                 return (T) e.getResponse();
             } catch (MongoException e) {
@@ -173,8 +173,8 @@ public <T> T execute(final CommandProtocol<T> protocol, final InternalConnection
         @Override
         public <T> void executeAsync(final CommandProtocol<T> protocol, final InternalConnection connection,
                                      final SessionContext sessionContext, final SingleResultCallback<T> callback) {
-            protocol.sessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock));
-            protocol.executeAsync(connection, errorHandlingCallback((result, t) -> {
+            protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock))
+                    .executeAsync(connection, errorHandlingCallback((result, t) -> {
                 if (t != null) {
                     if (t instanceof MongoWriteConcernWithResponseException) {
                         callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null);
@@ -191,7 +191,7 @@ public <T> void executeAsync(final CommandProtocol<T> protocol, final InternalCo
         private void handleExecutionException(final InternalConnection connection, final SessionContext sessionContext,
                                               final Throwable t) {
             invalidate(t, connection.getDescription().getServiceId(), connection.getGeneration());
-            if (t instanceof MongoSocketException && sessionContext.hasSession()) {
+            if (shouldMarkSessionDirty(t, sessionContext)) {
                 sessionContext.markSessionDirty();
             }
         }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java
index 6215bc8b98a..3821ca947c6 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java
@@ -18,7 +18,6 @@
 
 import com.mongodb.LoggerSettings;
 import com.mongodb.MongoCommandException;
-import com.mongodb.RequestContext;
 import com.mongodb.connection.ClusterId;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.event.CommandListener;
@@ -36,7 +35,6 @@
 import org.bson.json.JsonWriterSettings;
 
 import java.io.StringWriter;
-
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
@@ -66,7 +64,6 @@ class LoggingCommandEventSender implements CommandEventSender {
     private final ConnectionDescription description;
     @Nullable
     private final CommandListener commandListener;
-    private final RequestContext requestContext;
     private final OperationContext operationContext;
     private final StructuredLogger logger;
     private final LoggerSettings loggerSettings;
@@ -78,12 +75,14 @@ class LoggingCommandEventSender implements CommandEventSender {
 
     LoggingCommandEventSender(final Set<String> securitySensitiveCommands, final Set<String> securitySensitiveHelloCommands,
             final ConnectionDescription description,
-            @Nullable final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext,
-            final CommandMessage message, final ByteBufferBsonOutput bsonOutput, final StructuredLogger logger,
+            @Nullable final CommandListener commandListener,
+            final OperationContext operationContext,
+            final CommandMessage message,
+            final ByteBufferBsonOutput bsonOutput,
+            final StructuredLogger logger,
             final LoggerSettings loggerSettings) {
         this.description = description;
         this.commandListener = commandListener;
-        this.requestContext = requestContext;
         this.operationContext = operationContext;
         this.logger = logger;
         this.loggerSettings = loggerSettings;
@@ -113,7 +112,7 @@ public void sendStartedEvent() {
                     ? new BsonDocument() : commandDocument;
 
             sendCommandStartedEvent(message, message.getNamespace().getDatabaseName(), commandName, commandDocumentForEvent, description,
-                    assertNotNull(commandListener), requestContext, operationContext);
+                    assertNotNull(commandListener), operationContext);
         }
         // the buffer underlying the command document may be released after the started event, so set to null to ensure it's not used
         // when sending the failed or succeeded event
@@ -142,8 +141,8 @@ public void sendFailedEvent(final Throwable t) {
         }
 
         if (eventRequired()) {
-            sendCommandFailedEvent(message, message.getNamespace().getDatabaseName(), commandName, description, elapsedTimeNanos,
-                    commandEventException, commandListener, requestContext, operationContext);
+            sendCommandFailedEvent(message, commandName, message.getNamespace().getDatabaseName(), description, elapsedTimeNanos,
+                    commandEventException, commandListener, operationContext);
         }
     }
 
@@ -179,8 +178,8 @@ private void sendSucceededEvent(final BsonDocument reply) {
 
         if (eventRequired()) {
             BsonDocument responseDocumentForEvent = redactionRequired ? new BsonDocument() : reply;
-            sendCommandSucceededEvent(message, message.getNamespace().getDatabaseName(), commandName, responseDocumentForEvent, description,
-                    elapsedTimeNanos, commandListener, requestContext, operationContext);
+            sendCommandSucceededEvent(message, commandName, message.getNamespace().getDatabaseName(), responseDocumentForEvent,
+                    description, elapsedTimeNanos, commandListener, operationContext);
         }
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java
index 3157635febf..7a5734bc140 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java
@@ -49,6 +49,7 @@ public final class MessageSettings {
     private final int maxWireVersion;
     private final ServerType serverType;
     private final boolean sessionSupported;
+    private final boolean cryptd;
 
     /**
      * Gets the builder
@@ -70,6 +71,7 @@ public static final class Builder {
         private int maxWireVersion;
         private ServerType serverType;
         private boolean sessionSupported;
+        private boolean cryptd;
 
         /**
          * Build it.
@@ -127,6 +129,17 @@ public Builder sessionSupported(final boolean sessionSupported) {
             this.sessionSupported = sessionSupported;
             return this;
         }
+
+        /**
+         * Set whether the server is a mongocryptd.
+         *
+         * @param cryptd true if the server is a mongocryptd.
+         * @return this
+         */
+        public Builder cryptd(final boolean cryptd) {
+            this.cryptd = cryptd;
+            return this;
+        }
     }
 
     /**
@@ -163,6 +176,9 @@ public int getMaxWireVersion() {
     public ServerType getServerType() {
         return serverType;
     }
+    public boolean isCryptd() {
+        return cryptd;
+    }
 
     public boolean isSessionSupported() {
         return sessionSupported;
@@ -176,5 +192,6 @@ private MessageSettings(final Builder builder) {
         this.maxWireVersion = builder.maxWireVersion;
         this.serverType = builder.serverType;
         this.sessionSupported = builder.sessionSupported;
+        this.cryptd = builder.cryptd;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java
index 164d93aac9c..3d778ae0349 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java
@@ -226,31 +226,35 @@ static OidcCallback getGcpCallback(final MongoCredential credential) {
     }
 
     @Override
-    public void reauthenticate(final InternalConnection connection) {
+    public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) {
         assertTrue(connection.opened());
-        authenticationLoop(connection, connection.getDescription());
+        authenticationLoop(connection, connection.getDescription(), operationContext);
     }
 
     @Override
-    public void reauthenticateAsync(final InternalConnection connection, final SingleResultCallback<Void> callback) {
+    public void reauthenticateAsync(final InternalConnection connection,
+                                    final OperationContext operationContext,
+                                    final SingleResultCallback<Void> callback) {
         beginAsync().thenRun(c -> {
             assertTrue(connection.opened());
-            authenticationLoopAsync(connection, connection.getDescription(), c);
+            authenticationLoopAsync(connection, connection.getDescription(), operationContext, c);
         }).finish(callback);
     }
 
     @Override
-    public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) {
+    public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription,
+                             final OperationContext operationContext) {
         assertFalse(connection.opened());
-        authenticationLoop(connection, connectionDescription);
+        authenticationLoop(connection, connectionDescription, operationContext);
     }
 
     @Override
     void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription,
+            final OperationContext operationContext,
             final SingleResultCallback<Void> callback) {
         beginAsync().thenRun(c -> {
             assertFalse(connection.opened());
-            authenticationLoopAsync(connection, connectionDescription, c);
+            authenticationLoopAsync(connection, connectionDescription, operationContext, c);
         }).finish(callback);
     }
 
@@ -266,11 +270,12 @@ private static boolean triggersRetry(@Nullable final Throwable t) {
         return false;
     }
 
-    private void authenticationLoop(final InternalConnection connection, final ConnectionDescription description) {
+    private void authenticationLoop(final InternalConnection connection, final ConnectionDescription description,
+                                    final OperationContext operationContext) {
         fallbackState = FallbackState.INITIAL;
         while (true) {
             try {
-                super.authenticate(connection, description);
+                super.authenticate(connection, description, operationContext);
                 break;
                 } catch (Exception e) {
                 if (triggersRetry(e) && shouldRetryHandler()) {
@@ -282,10 +287,12 @@ private void authenticationLoop(final InternalConnection connection, final Conne
     }
 
     private void authenticationLoopAsync(final InternalConnection connection, final ConnectionDescription description,
+            final OperationContext operationContext,
             final SingleResultCallback<Void> callback) {
         fallbackState = FallbackState.INITIAL;
         beginAsync().thenRunRetryingWhile(
-                c -> super.authenticateAsync(connection, description, c),
+                operationContext.getTimeoutContext(),
+                c -> super.authenticateAsync(connection, description, operationContext, c),
                 e -> triggersRetry(e) && shouldRetryHandler()
         ).finish(callback);
     }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java
index 683f6adfbf8..bf29ebc051b 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java
@@ -16,10 +16,17 @@
 package com.mongodb.internal.connection;
 
 import com.mongodb.MongoConnectionPoolClearedException;
+import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
+import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.ClusterType;
 import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
+import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import com.mongodb.selector.ServerSelector;
 
@@ -27,6 +34,7 @@
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
+
 import static java.util.stream.Collectors.toList;
 
 /**
@@ -36,16 +44,93 @@ public class OperationContext {
     private static final AtomicLong NEXT_ID = new AtomicLong(0);
     private final long id;
     private final ServerDeprioritization serverDeprioritization;
+    private final SessionContext sessionContext;
+    private final RequestContext requestContext;
+    private final TimeoutContext timeoutContext;
+    @Nullable
+    private final ServerApi serverApi;
+
+    public OperationContext(final RequestContext requestContext, final SessionContext sessionContext, final TimeoutContext timeoutContext,
+            @Nullable final ServerApi serverApi) {
+        this(NEXT_ID.incrementAndGet(), requestContext, sessionContext, timeoutContext, new ServerDeprioritization(), serverApi);
+    }
+
+    public static OperationContext simpleOperationContext(
+            final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) {
+        return new OperationContext(
+                IgnorableRequestContext.INSTANCE,
+                NoOpSessionContext.INSTANCE,
+                new TimeoutContext(timeoutSettings),
+                serverApi);
+    }
+
+    public static OperationContext simpleOperationContext(final TimeoutContext timeoutContext) {
+        return new OperationContext(
+                IgnorableRequestContext.INSTANCE,
+                NoOpSessionContext.INSTANCE,
+                timeoutContext,
+                null);
+    }
+
+    public OperationContext withSessionContext(final SessionContext sessionContext) {
+        return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi);
+    }
 
-    public OperationContext() {
-        id = NEXT_ID.incrementAndGet();
-        serverDeprioritization = new ServerDeprioritization();
+    public OperationContext withTimeoutContext(final TimeoutContext timeoutContext) {
+        return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi);
     }
 
     public long getId() {
         return id;
     }
 
+    public SessionContext getSessionContext() {
+        return sessionContext;
+    }
+
+    public RequestContext getRequestContext() {
+        return requestContext;
+    }
+
+    public TimeoutContext getTimeoutContext() {
+        return timeoutContext;
+    }
+
+    @Nullable
+    public ServerApi getServerApi() {
+        return serverApi;
+    }
+
+    @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE)
+    public OperationContext(final long id,
+                            final RequestContext requestContext,
+                            final SessionContext sessionContext,
+                            final TimeoutContext timeoutContext,
+                            final ServerDeprioritization serverDeprioritization,
+                            @Nullable final ServerApi serverApi) {
+        this.id = id;
+        this.serverDeprioritization = serverDeprioritization;
+        this.requestContext = requestContext;
+        this.sessionContext = sessionContext;
+        this.timeoutContext = timeoutContext;
+        this.serverApi = serverApi;
+    }
+
+    @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE)
+    public OperationContext(final long id,
+                            final RequestContext requestContext,
+                            final SessionContext sessionContext,
+                            final TimeoutContext timeoutContext,
+                            @Nullable final ServerApi serverApi) {
+        this.id = id;
+        this.serverDeprioritization = new ServerDeprioritization();
+        this.requestContext = requestContext;
+        this.sessionContext = sessionContext;
+        this.timeoutContext = timeoutContext;
+        this.serverApi = serverApi;
+    }
+
+
     /**
      * @return The same {@link ServerDeprioritization} if called on the same {@link OperationContext}.
      */
@@ -114,3 +199,4 @@ private boolean isEnabled(final ClusterType clusterType) {
         }
     }
 }
+
diff --git a/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java
index 23287362502..c6ad5f451a0 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java
@@ -26,11 +26,13 @@
 import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
 import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.connection.ServerDescription;
 import com.mongodb.event.CommandFailedEvent;
 import com.mongodb.event.CommandListener;
 import com.mongodb.event.CommandStartedEvent;
 import com.mongodb.event.CommandSucceededEvent;
 import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.lang.Nullable;
@@ -83,12 +85,14 @@ static boolean isCommandOk(final ResponseBuffers responseBuffers) {
     }
 
     @Nullable
-    static MongoException createSpecialWriteConcernException(final ResponseBuffers responseBuffers, final ServerAddress serverAddress) {
+    static MongoException createSpecialWriteConcernException(final ResponseBuffers responseBuffers,
+                                                             final ServerAddress serverAddress,
+                                                             final TimeoutContext timeoutContext) {
         BsonValue writeConcernError = getField(createBsonReader(responseBuffers), "writeConcernError");
         if (writeConcernError == null) {
             return null;
         } else {
-            return createSpecialException(writeConcernError.asDocument(), serverAddress, "errmsg");
+            return createSpecialException(writeConcernError.asDocument(), serverAddress, "errmsg", timeoutContext);
         }
     }
 
@@ -197,8 +201,9 @@ private static boolean isCommandOk(@Nullable final BsonValue okValue) {
         }
     }
 
-    static MongoException getCommandFailureException(final BsonDocument response, final ServerAddress serverAddress) {
-        MongoException specialException = createSpecialException(response, serverAddress, "errmsg");
+    static MongoException getCommandFailureException(final BsonDocument response, final ServerAddress serverAddress,
+                                                     final TimeoutContext timeoutContext) {
+        MongoException specialException = createSpecialException(response, serverAddress, "errmsg", timeoutContext);
         if (specialException != null) {
             return specialException;
         }
@@ -213,15 +218,16 @@ static String getErrorMessage(final BsonDocument response, final String errorMes
         return response.getString(errorMessageFieldName, new BsonString("")).getValue();
     }
 
-    static MongoException getQueryFailureException(final BsonDocument errorDocument, final ServerAddress serverAddress) {
-        MongoException specialException = createSpecialException(errorDocument, serverAddress, "$err");
+    static MongoException getQueryFailureException(final BsonDocument errorDocument, final ServerAddress serverAddress,
+                                                   final TimeoutContext timeoutContext) {
+        MongoException specialException = createSpecialException(errorDocument, serverAddress, "$err", timeoutContext);
         if (specialException != null) {
             return specialException;
         }
         return new MongoQueryException(errorDocument, serverAddress);
     }
 
-    static MessageSettings getMessageSettings(final ConnectionDescription connectionDescription) {
+    static MessageSettings getMessageSettings(final ConnectionDescription connectionDescription, final ServerDescription serverDescription) {
         return MessageSettings.builder()
                 .maxDocumentSize(connectionDescription.getMaxDocumentSize())
                 .maxMessageSize(connectionDescription.getMaxMessageSize())
@@ -229,6 +235,7 @@ static MessageSettings getMessageSettings(final ConnectionDescription connection
                 .maxWireVersion(connectionDescription.getMaxWireVersion())
                 .serverType(connectionDescription.getServerType())
                 .sessionSupported(connectionDescription.getLogicalSessionTimeoutMinutes() != null)
+                .cryptd(serverDescription.isCryptd())
                 .build();
     }
 
@@ -238,22 +245,28 @@ static MessageSettings getMessageSettings(final ConnectionDescription connection
     private static final List<String> RECOVERING_MESSAGES = asList("not master or secondary", "node is recovering");
 
     @Nullable
-    public static MongoException createSpecialException(@Nullable final BsonDocument response, final ServerAddress serverAddress,
-                                                        final String errorMessageFieldName) {
+    public static MongoException createSpecialException(@Nullable final BsonDocument response,
+                                                        final ServerAddress serverAddress,
+                                                        final String errorMessageFieldName,
+                                                        final TimeoutContext timeoutContext) {
         if (response == null) {
             return null;
         }
         int errorCode = getErrorCode(response);
         String errorMessage = getErrorMessage(response, errorMessageFieldName);
         if (ErrorCategory.fromErrorCode(errorCode) == ErrorCategory.EXECUTION_TIMEOUT) {
-            return new MongoExecutionTimeoutException(errorCode, errorMessage, response);
+            MongoExecutionTimeoutException mongoExecutionTimeoutException = new MongoExecutionTimeoutException(errorCode, errorMessage, response);
+            if (timeoutContext.hasTimeoutMS()) {
+                return TimeoutContext.createMongoTimeoutException(mongoExecutionTimeoutException);
+            }
+            return mongoExecutionTimeoutException;
         } else if (isNodeIsRecoveringError(errorCode, errorMessage)) {
             return new MongoNodeIsRecoveringException(response, serverAddress);
         } else if (isNotPrimaryError(errorCode, errorMessage)) {
             return new MongoNotPrimaryException(response, serverAddress);
         } else if (response.containsKey("writeConcernError")) {
             MongoException writeConcernException = createSpecialException(response.getDocument("writeConcernError"), serverAddress,
-                    "errmsg");
+                    "errmsg", timeoutContext);
             if (writeConcernException != null && response.isArray("errorLabels")) {
                 for (BsonValue errorLabel : response.getArray("errorLabels")) {
                     writeConcernException.addLabel(errorLabel.asString().getValue());
@@ -277,11 +290,11 @@ private static boolean isNodeIsRecoveringError(final int errorCode, final String
 
     static void sendCommandStartedEvent(final RequestMessage message, final String databaseName, final String commandName,
             final BsonDocument command, final ConnectionDescription connectionDescription,
-            final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext) {
-        notNull("requestContext", requestContext);
+            final CommandListener commandListener, final OperationContext operationContext) {
+        notNull("operationContext", operationContext);
         try {
-            commandListener.commandStarted(new CommandStartedEvent(getRequestContextForEvent(requestContext), operationContext.getId(), message.getId(),
-                    connectionDescription, databaseName, commandName, command));
+            commandListener.commandStarted(new CommandStartedEvent(getRequestContextForEvent(operationContext.getRequestContext()),
+                    operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, command));
         } catch (Exception e) {
             if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) {
                 PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command started event to listener %s", commandListener), e);
@@ -289,12 +302,13 @@ static void sendCommandStartedEvent(final RequestMessage message, final String d
         }
     }
 
-    static void sendCommandSucceededEvent(final RequestMessage message, final String databaseName, final String commandName,
+    static void sendCommandSucceededEvent(final RequestMessage message, final String commandName, final String databaseName,
             final BsonDocument response, final ConnectionDescription connectionDescription, final long elapsedTimeNanos,
-            final CommandListener commandListener, final RequestContext requestContext, final OperationContext operationContext) {
-        notNull("requestContext", requestContext);
+            final CommandListener commandListener, final OperationContext operationContext) {
+        notNull("operationContext", operationContext);
         try {
-            commandListener.commandSucceeded(new CommandSucceededEvent(getRequestContextForEvent(requestContext),
+
+            commandListener.commandSucceeded(new CommandSucceededEvent(getRequestContextForEvent(operationContext.getRequestContext()),
                     operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, response,
                     elapsedTimeNanos));
         } catch (Exception e) {
@@ -304,15 +318,15 @@ static void sendCommandSucceededEvent(final RequestMessage message, final String
         }
     }
 
-    static void sendCommandFailedEvent(final RequestMessage message, final String databaseName, final String commandName,
+    static void sendCommandFailedEvent(final RequestMessage message, final String commandName, final String databaseName,
             final ConnectionDescription connectionDescription, final long elapsedTimeNanos,
-            final Throwable throwable, final CommandListener commandListener, final RequestContext requestContext,
-            final OperationContext operationContext) {
-        notNull("requestContext", requestContext);
+            final Throwable throwable, final CommandListener commandListener, final OperationContext operationContext) {
+        notNull("operationContext", operationContext);
         try {
-            commandListener.commandFailed(new CommandFailedEvent(getRequestContextForEvent(requestContext),
+            commandListener.commandFailed(new CommandFailedEvent(getRequestContextForEvent(operationContext.getRequestContext()),
                     operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, elapsedTimeNanos,
                     throwable));
+
         } catch (Exception e) {
             if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) {
                 PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command failed event to listener %s", commandListener), e);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java
index f170cafdb00..86e2ebd1dbe 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.connection;
 
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinaryWriter;
 import org.bson.BsonBinaryWriterSettings;
@@ -127,13 +126,13 @@ public MessageSettings getSettings() {
      * Encoded the message to the given output.
      *
      * @param bsonOutput the output
-     * @param sessionContext the session context
+     * @param operationContext the session context
      */
-    public void encode(final BsonOutput bsonOutput, final SessionContext sessionContext) {
-        notNull("sessionContext", sessionContext);
+    public void encode(final BsonOutput bsonOutput, final OperationContext operationContext) {
+        notNull("operationContext", operationContext);
         int messageStartPosition = bsonOutput.getPosition();
         writeMessagePrologue(bsonOutput);
-        EncodingMetadata encodingMetadata = encodeMessageBodyWithMetadata(bsonOutput, sessionContext);
+        EncodingMetadata encodingMetadata = encodeMessageBodyWithMetadata(bsonOutput, operationContext);
         backpatchMessageLength(messageStartPosition, bsonOutput);
         this.encodingMetadata = encodingMetadata;
     }
@@ -163,10 +162,10 @@ protected void writeMessagePrologue(final BsonOutput bsonOutput) {
      * Encode the message body to the given output.
      *
      * @param bsonOutput the output
-     * @param sessionContext the session context
+     * @param operationContext the session context
      * @return the encoding metadata
      */
-    protected abstract EncodingMetadata encodeMessageBodyWithMetadata(BsonOutput bsonOutput, SessionContext sessionContext);
+    protected abstract EncodingMetadata encodeMessageBodyWithMetadata(BsonOutput bsonOutput, OperationContext operationContext);
 
     protected void addDocument(final BsonDocument document, final BsonOutput bsonOutput,
                                final FieldNameValidator validator, @Nullable final List<BsonElement> extraElements) {
diff --git a/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java
new file mode 100644
index 00000000000..ffba2caecc4
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.connection;
+
+import com.mongodb.annotations.ThreadSafe;
+
+import java.util.Deque;
+import java.util.concurrent.ConcurrentLinkedDeque;
+
+final class RoundTripTimeSampler {
+    private final ExponentiallyWeightedMovingAverage averageRoundTripTime = new ExponentiallyWeightedMovingAverage(0.2);
+    private final RecentSamples recentSamples = new RecentSamples();
+
+    void reset() {
+        averageRoundTripTime.reset();
+        recentSamples.reset();
+    }
+
+    void addSample(final long sample) {
+        recentSamples.add(sample);
+        averageRoundTripTime.addSample(sample);
+    }
+
+    long getAverage() {
+        return averageRoundTripTime.getAverage();
+    }
+
+    long getMin() {
+        return recentSamples.min();
+    }
+
+    @ThreadSafe
+    private static final class RecentSamples {
+
+        private static final int MAX_SIZE = 10;
+        private final Deque<Long> samples;
+
+        RecentSamples() {
+            samples = new ConcurrentLinkedDeque<>();
+        }
+
+        void add(final long sample) {
+            if (samples.size() == MAX_SIZE) {
+                samples.removeFirst();
+            }
+            samples.add(sample);
+        }
+
+        void reset() {
+            samples.clear();
+        }
+
+        long min() {
+            // Clients MUST report the minimum RTT as 0 until at least 2 samples have been gathered
+            return samples.size() < 2 ? 0 : samples.stream().min(Long::compareTo).orElse(0L);
+        }
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java
index 6e4bea55514..900d9a14e16 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java
@@ -20,6 +20,7 @@
 import com.mongodb.MongoCredential;
 import com.mongodb.MongoException;
 import com.mongodb.MongoInterruptedException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSecurityException;
 import com.mongodb.ServerAddress;
 import com.mongodb.ServerApi;
@@ -61,13 +62,13 @@ abstract class SaslAuthenticator extends Authenticator implements SpeculativeAut
         super(credential, clusterConnectionMode, serverApi);
     }
 
-    @Override
-    public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) {
+    public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription,
+                             final OperationContext operationContext) {
         doAsSubject(() -> {
             SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress());
             throwIfSaslClientIsNull(saslClient);
             try {
-                BsonDocument responseDocument = getNextSaslResponse(saslClient, connection);
+                BsonDocument responseDocument = getNextSaslResponse(saslClient, connection, operationContext);
                 BsonInt32 conversationId = responseDocument.getInt32("conversationId");
 
                 while (!(responseDocument.getBoolean("done")).getValue()) {
@@ -79,7 +80,8 @@ public void authenticate(final InternalConnection connection, final ConnectionDe
                                         + getMongoCredential());
                     }
 
-                    responseDocument = sendSaslContinue(conversationId, response, connection);
+                    responseDocument = sendSaslContinue(conversationId, response, connection, operationContext);
+                    operationContext.getTimeoutContext().resetMaintenanceTimeout();
                 }
                 if (!saslClient.isComplete()) {
                     saslClient.evaluateChallenge((responseDocument.getBinary("payload")).getData());
@@ -100,12 +102,12 @@ public void authenticate(final InternalConnection connection, final ConnectionDe
 
     @Override
     void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription,
-                           final SingleResultCallback<Void> callback) {
+            final OperationContext operationContext, final SingleResultCallback<Void> callback) {
         try {
             doAsSubject(() -> {
                 SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress());
                 throwIfSaslClientIsNull(saslClient);
-                getNextSaslResponseAsync(saslClient, connection, callback);
+                getNextSaslResponseAsync(saslClient, connection, operationContext, callback);
                 return null;
             });
         } catch (Throwable t) {
@@ -127,7 +129,8 @@ private void throwIfSaslClientIsNull(@Nullable final SaslClient saslClient) {
         }
     }
 
-    private BsonDocument getNextSaslResponse(final SaslClient saslClient, final InternalConnection connection) {
+    private BsonDocument getNextSaslResponse(final SaslClient saslClient, final InternalConnection connection,
+                                             final OperationContext operationContext) {
         BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse();
         if (response != null) {
             return response;
@@ -135,20 +138,20 @@ private BsonDocument getNextSaslResponse(final SaslClient saslClient, final Inte
 
         try {
             byte[] serverResponse = saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null;
-            return sendSaslStart(serverResponse, connection);
+            return sendSaslStart(serverResponse, connection, operationContext);
         } catch (Exception e) {
             throw wrapException(e);
         }
     }
 
     private void getNextSaslResponseAsync(final SaslClient saslClient, final InternalConnection connection,
-                                          final SingleResultCallback<Void> callback) {
+            final OperationContext operationContext, final SingleResultCallback<Void> callback) {
         SingleResultCallback<Void> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
         try {
             BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse();
             if (response == null) {
                 byte[] serverResponse = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null);
-                sendSaslStartAsync(serverResponse, connection, (result, t) -> {
+                sendSaslStartAsync(serverResponse, connection, operationContext, (result, t) -> {
                     if (t != null) {
                         errHandlingCallback.onResult(null, wrapException(t));
                         return;
@@ -157,13 +160,13 @@ private void getNextSaslResponseAsync(final SaslClient saslClient, final Interna
                     if (result.getBoolean("done").getValue()) {
                         verifySaslClientComplete(saslClient, result, errHandlingCallback);
                     } else {
-                        new Continuator(saslClient, result, connection, errHandlingCallback).start();
+                        new Continuator(saslClient, result, connection, operationContext, errHandlingCallback).start();
                     }
                 });
             } else if (response.getBoolean("done").getValue()) {
                 verifySaslClientComplete(saslClient, response, errHandlingCallback);
             } else {
-                new Continuator(saslClient, response, connection, errHandlingCallback).start();
+                new Continuator(saslClient, response, connection, operationContext, errHandlingCallback).start();
             }
         } catch (Exception e) {
             callback.onResult(null, wrapException(e));
@@ -225,29 +228,47 @@ protected SubjectProvider getDefaultSubjectProvider() {
         return () -> null;
     }
 
-    private BsonDocument sendSaslStart(@Nullable final byte[] outToken, final InternalConnection connection) {
+    private BsonDocument sendSaslStart(@Nullable final byte[] outToken, final InternalConnection connection,
+            final OperationContext operationContext) {
         BsonDocument startDocument = createSaslStartCommandDocument(outToken);
         appendSaslStartOptions(startDocument);
-        return executeCommand(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection);
+        try {
+            return executeCommand(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection,
+                    operationContext);
+        } finally {
+            operationContext.getTimeoutContext().resetMaintenanceTimeout();
+        }
     }
 
-    private BsonDocument sendSaslContinue(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection) {
-        return executeCommand(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken),
-                getClusterConnectionMode(), getServerApi(), connection);
+    private BsonDocument sendSaslContinue(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection,
+            final OperationContext operationContext) {
+        try {
+            return executeCommand(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken),
+                    getClusterConnectionMode(), getServerApi(), connection, operationContext);
+        } finally {
+            operationContext.getTimeoutContext().resetMaintenanceTimeout();
+        }
     }
 
     private void sendSaslStartAsync(@Nullable final byte[] outToken, final InternalConnection connection,
-                                    final SingleResultCallback<BsonDocument> callback) {
+            final OperationContext operationContext, final SingleResultCallback<BsonDocument> callback) {
         BsonDocument startDocument = createSaslStartCommandDocument(outToken);
         appendSaslStartOptions(startDocument);
+
         executeCommandAsync(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection,
-                callback);
+                operationContext, (r, t) -> {
+                    operationContext.getTimeoutContext().resetMaintenanceTimeout();
+                    callback.onResult(r, t);
+                });
     }
 
     private void sendSaslContinueAsync(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection,
-                                       final SingleResultCallback<BsonDocument> callback) {
+            final OperationContext operationContext, final SingleResultCallback<BsonDocument> callback) {
         executeCommandAsync(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken),
-                getClusterConnectionMode(), getServerApi(), connection, callback);
+                getClusterConnectionMode(), getServerApi(), connection, operationContext, (r, t) -> {
+                    operationContext.getTimeoutContext().resetMaintenanceTimeout();
+                    callback.onResult(r, t);
+                });
     }
 
     protected BsonDocument createSaslStartCommandDocument(@Nullable final byte[] outToken) {
@@ -271,6 +292,8 @@ private void disposeOfSaslClient(final SaslClient saslClient) {
     protected MongoException wrapException(final Throwable t) {
         if (t instanceof MongoInterruptedException) {
             return (MongoInterruptedException) t;
+        } else if (t instanceof MongoOperationTimeoutException) {
+            return (MongoOperationTimeoutException) t;
         } else if (t instanceof MongoSecurityException) {
             return (MongoSecurityException) t;
         } else {
@@ -300,13 +323,15 @@ private final class Continuator implements SingleResultCallback<BsonDocument> {
         private final SaslClient saslClient;
         private final BsonDocument saslStartDocument;
         private final InternalConnection connection;
+        private final OperationContext operationContext;
         private final SingleResultCallback<Void> callback;
 
         Continuator(final SaslClient saslClient, final BsonDocument saslStartDocument, final InternalConnection connection,
-                    final SingleResultCallback<Void> callback) {
+                    final OperationContext operationContext,                 final SingleResultCallback<Void> callback) {
             this.saslClient = saslClient;
             this.saslStartDocument = saslStartDocument;
             this.connection = connection;
+            this.operationContext = operationContext;
             this.callback = callback;
         }
 
@@ -335,13 +360,13 @@ private void continueConversation(final BsonDocument result) {
                 doAsSubject(() -> {
                     try {
                         sendSaslContinueAsync(saslStartDocument.getInt32("conversationId"),
-                                saslClient.evaluateChallenge((result.getBinary("payload")).getData()), connection, Continuator.this);
+                                saslClient.evaluateChallenge((result.getBinary("payload")).getData()), connection,
+                                operationContext, Continuator.this);
                     } catch (SaslException e) {
                         throw wrapException(e);
                     }
                     return null;
                 });
-
             } catch (Throwable t) {
                 callback.onResult(null, t);
                 disposeOfSaslClient(saslClient);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java
index 3c9d3b126bf..daeb67be54d 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java
@@ -24,9 +24,11 @@
 import com.mongodb.connection.ClusterType;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.connection.ServerType;
+import com.mongodb.event.ServerDescriptionChangedEvent;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
-import com.mongodb.event.ServerDescriptionChangedEvent;
+import com.mongodb.internal.time.Timeout;
 
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -68,7 +70,9 @@ protected void connect() {
     }
 
     @Override
-    public ServersSnapshot getServersSnapshot() {
+    public ServersSnapshot getServersSnapshot(
+            final Timeout serverSelectionTimeout,
+            final TimeoutContext timeoutContext) {
         isTrue("open", !isClosed());
         ClusterableServer server = assertNotNull(this.server.get());
         return serverAddress -> server;
diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java
index 7ee08fd967c..a1c3ed0d914 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java
@@ -38,15 +38,14 @@
 import java.net.SocketTimeoutException;
 import java.util.Iterator;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.assertTrue;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutContext.throwMongoTimeoutException;
 import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses;
 import static com.mongodb.internal.connection.SocketStreamHelper.configureSocket;
 import static com.mongodb.internal.connection.SslHelper.configureSslSocket;
 import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -75,9 +74,9 @@ public SocketStream(final ServerAddress address, final InetAddressResolver inetA
     }
 
     @Override
-    public void open() {
+    public void open(final OperationContext operationContext) {
         try {
-            socket = initializeSocket();
+            socket = initializeSocket(operationContext);
             outputStream = socket.getOutputStream();
             inputStream = socket.getInputStream();
         } catch (IOException e) {
@@ -87,22 +86,22 @@ public void open() {
         }
     }
 
-    protected Socket initializeSocket() throws IOException {
+    protected Socket initializeSocket(final OperationContext operationContext) throws IOException {
         ProxySettings proxySettings = settings.getProxySettings();
         if (proxySettings.isProxyEnabled()) {
             if (sslSettings.isEnabled()) {
                 assertTrue(socketFactory instanceof SSLSocketFactory);
                 SSLSocketFactory sslSocketFactory = (SSLSocketFactory) socketFactory;
-                return initializeSslSocketOverSocksProxy(sslSocketFactory);
+                return initializeSslSocketOverSocksProxy(operationContext, sslSocketFactory);
             }
-            return initializeSocketOverSocksProxy();
+            return initializeSocketOverSocksProxy(operationContext);
         }
 
         Iterator<InetSocketAddress> inetSocketAddresses = getSocketAddresses(address, inetAddressResolver).iterator();
         while (inetSocketAddresses.hasNext()) {
             Socket socket = socketFactory.createSocket();
             try {
-                SocketStreamHelper.initialize(socket, inetSocketAddresses.next(), settings, sslSettings);
+                SocketStreamHelper.initialize(operationContext, socket, inetSocketAddresses.next(), settings, sslSettings);
                 return socket;
             } catch (SocketTimeoutException e) {
                 if (!inetSocketAddresses.hasNext()) {
@@ -114,14 +113,15 @@ protected Socket initializeSocket() throws IOException {
         throw new MongoSocketException("Exception opening socket", getAddress());
     }
 
-    private SSLSocket initializeSslSocketOverSocksProxy(final SSLSocketFactory sslSocketFactory) throws IOException {
+    private SSLSocket initializeSslSocketOverSocksProxy(final OperationContext operationContext,
+            final SSLSocketFactory sslSocketFactory) throws IOException {
         final String serverHost = address.getHost();
         final int serverPort = address.getPort();
 
         SocksSocket socksProxy = new SocksSocket(settings.getProxySettings());
-        configureSocket(socksProxy, settings);
+        configureSocket(socksProxy, operationContext, settings);
         InetSocketAddress inetSocketAddress = toSocketAddress(serverHost, serverPort);
-        socksProxy.connect(inetSocketAddress, settings.getConnectTimeout(MILLISECONDS));
+        socksProxy.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs());
 
         SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socksProxy, serverHost, serverPort, true);
         //Even though Socks proxy connection is already established, TLS handshake has not been performed yet.
@@ -139,9 +139,9 @@ private static InetSocketAddress toSocketAddress(final String serverHost, final
         return InetSocketAddress.createUnresolved(serverHost, serverPort);
     }
 
-    private Socket initializeSocketOverSocksProxy() throws IOException {
+    private Socket initializeSocketOverSocksProxy(final OperationContext operationContext) throws IOException {
         Socket createdSocket = socketFactory.createSocket();
-        configureSocket(createdSocket, settings);
+        configureSocket(createdSocket, operationContext, settings);
         /*
           Wrap the configured socket with SocksSocket to add extra functionality.
           Reason for separate steps: We can't directly extend Java 11 methods within 'SocksSocket'
@@ -150,7 +150,7 @@ private Socket initializeSocketOverSocksProxy() throws IOException {
         SocksSocket socksProxy = new SocksSocket(createdSocket, settings.getProxySettings());
 
         socksProxy.connect(toSocketAddress(address.getHost(), address.getPort()),
-                settings.getConnectTimeout(TimeUnit.MILLISECONDS));
+                operationContext.getTimeoutContext().getConnectTimeoutMs());
         return socksProxy;
     }
 
@@ -160,60 +160,58 @@ public ByteBuf getBuffer(final int size) {
     }
 
     @Override
-    public void write(final List<ByteBuf> buffers) throws IOException {
+    public void write(final List<ByteBuf> buffers, final OperationContext operationContext) throws IOException {
         for (final ByteBuf cur : buffers) {
             outputStream.write(cur.array(), 0, cur.limit());
+            operationContext.getTimeoutContext().onExpired(() -> {
+                throwMongoTimeoutException("Socket write exceeded the timeout limit.");
+            });
         }
     }
 
     @Override
-    public ByteBuf read(final int numBytes) throws IOException {
-        ByteBuf buffer = bufferProvider.getBuffer(numBytes);
+    public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException {
         try {
-            int totalBytesRead = 0;
-            byte[] bytes = buffer.array();
-            while (totalBytesRead < buffer.limit()) {
-                int bytesRead = inputStream.read(bytes, totalBytesRead, buffer.limit() - totalBytesRead);
-                if (bytesRead == -1) {
-                    throw new MongoSocketReadException("Prematurely reached end of stream", getAddress());
+            ByteBuf buffer = bufferProvider.getBuffer(numBytes);
+            try {
+                int totalBytesRead = 0;
+                byte[] bytes = buffer.array();
+                while (totalBytesRead < buffer.limit()) {
+                    int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS();
+                    socket.setSoTimeout(readTimeoutMS);
+                    int bytesRead = inputStream.read(bytes, totalBytesRead, buffer.limit() - totalBytesRead);
+                    if (bytesRead == -1) {
+                        throw new MongoSocketReadException("Prematurely reached end of stream", getAddress());
+                    }
+                    totalBytesRead += bytesRead;
                 }
-                totalBytesRead += bytesRead;
+                return buffer;
+            } catch (Exception e) {
+                buffer.release();
+                throw e;
             }
-            return buffer;
-        } catch (Exception e) {
-            buffer.release();
-            throw e;
-        }
-    }
-
-    @Override
-    public ByteBuf read(final int numBytes, final int additionalTimeout) throws IOException {
-        int curTimeout = socket.getSoTimeout();
-        if (curTimeout > 0 && additionalTimeout > 0) {
-            socket.setSoTimeout(curTimeout + additionalTimeout);
-        }
-        try {
-            return read(numBytes);
         } finally {
             if (!socket.isClosed()) {
                 // `socket` may be closed if the current thread is virtual, and it is interrupted while reading
-                socket.setSoTimeout(curTimeout);
+                socket.setSoTimeout(0);
             }
         }
     }
 
     @Override
-    public void openAsync(final AsyncCompletionHandler<Void> handler) {
+    public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler<Void> handler) {
         throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations.");
     }
 
     @Override
-    public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler<Void> handler) {
+    public void writeAsync(final List<ByteBuf> buffers, final OperationContext operationContext,
+            final AsyncCompletionHandler<Void> handler) {
         throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations.");
     }
 
     @Override
-    public void readAsync(final int numBytes, final AsyncCompletionHandler<ByteBuf> handler) {
+    public void readAsync(final int numBytes, final OperationContext operationContext,
+            final AsyncCompletionHandler<ByteBuf> handler) {
         throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations.");
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java
index 1b5e789e646..74098c4ede6 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java
@@ -28,7 +28,6 @@
 import java.net.SocketOption;
 
 import static com.mongodb.internal.connection.SslHelper.configureSslSocket;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 @SuppressWarnings({"unchecked", "rawtypes"})
 final class SocketStreamHelper {
@@ -69,17 +68,21 @@ final class SocketStreamHelper {
         SET_OPTION_METHOD = setOptionMethod;
     }
 
-    static void initialize(final Socket socket, final InetSocketAddress inetSocketAddress, final SocketSettings settings,
-                           final SslSettings sslSettings) throws IOException {
-        configureSocket(socket, settings);
+    static void initialize(final OperationContext operationContext, final Socket socket,
+            final InetSocketAddress inetSocketAddress, final SocketSettings settings,
+            final SslSettings sslSettings) throws IOException {
+        configureSocket(socket, operationContext, settings);
         configureSslSocket(socket, sslSettings, inetSocketAddress);
-        socket.connect(inetSocketAddress, settings.getConnectTimeout(MILLISECONDS));
+        socket.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs());
     }
 
-    static void configureSocket(final Socket socket, final SocketSettings settings) throws SocketException {
+    static void configureSocket(final Socket socket, final OperationContext operationContext, final SocketSettings settings) throws SocketException {
         socket.setTcpNoDelay(true);
-        socket.setSoTimeout(settings.getReadTimeout(MILLISECONDS));
         socket.setKeepAlive(true);
+        int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS();
+        if (readTimeoutMS > 0) {
+            socket.setSoTimeout(readTimeoutMS);
+        }
 
         // Adding keep alive options for users of Java 11+. These options will be ignored for older Java versions.
         setExtendedSocketOptions(socket);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java
index 3b4eac7b48e..8a0152c9423 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java
@@ -32,7 +32,6 @@
 import java.nio.channels.SocketChannel;
 import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.assertFalse;
 import static com.mongodb.assertions.Assertions.assertNotNull;
@@ -44,6 +43,8 @@
 import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V4;
 import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V6;
 import static com.mongodb.internal.connection.SocksSocket.ServerReply.REPLY_SUCCEEDED;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -84,17 +85,18 @@ public void connect(final SocketAddress endpoint, final int timeoutMs) throws IO
         // `Socket` requires `IllegalArgumentException`
         isTrueArgument("timeoutMs", timeoutMs >= 0);
         try {
-            Timeout timeout = toTimeout(timeoutMs);
+            Timeout timeout = Timeout.expiresIn(timeoutMs, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE);
             InetSocketAddress unresolvedAddress = (InetSocketAddress) endpoint;
             assertTrue(unresolvedAddress.isUnresolved());
             this.remoteAddress = unresolvedAddress;
 
             InetSocketAddress proxyAddress = new InetSocketAddress(assertNotNull(proxySettings.getHost()), proxySettings.getPort());
-            if (socket != null) {
-                socket.connect(proxyAddress, remainingMillis(timeout));
-            } else {
-                super.connect(proxyAddress, remainingMillis(timeout));
-            }
+
+            timeout.checkedRun(MILLISECONDS,
+                    () -> socketConnect(proxyAddress, 0),
+                    (ms) -> socketConnect(proxyAddress, Math.toIntExact(ms)),
+                    () -> throwSocketConnectionTimeout());
+
             SocksAuthenticationMethod authenticationMethod = performNegotiation(timeout);
             authenticate(authenticationMethod, timeout);
             sendConnect(timeout);
@@ -114,6 +116,14 @@ public void connect(final SocketAddress endpoint, final int timeoutMs) throws IO
         }
     }
 
+    private void socketConnect(final InetSocketAddress proxyAddress, final int rem) throws IOException {
+        if (socket != null) {
+            socket.connect(proxyAddress, rem);
+        } else {
+            super.connect(proxyAddress, rem);
+        }
+    }
+
     private void sendConnect(final Timeout timeout) throws IOException {
         final String host = remoteAddress.getHostName();
         final int port = remoteAddress.getPort();
@@ -292,26 +302,6 @@ private SocksAuthenticationMethod[] getSocksAuthenticationMethods() {
         return authMethods;
     }
 
-    private static Timeout toTimeout(final int timeoutMs) {
-        if (timeoutMs == 0) {
-            return Timeout.infinite();
-        }
-        return Timeout.startNow(timeoutMs, TimeUnit.MILLISECONDS);
-    }
-
-    private static int remainingMillis(final Timeout timeout) throws IOException {
-        if (timeout.isInfinite()) {
-            return 0;
-        }
-
-        final int remaining = Math.toIntExact(timeout.remaining(TimeUnit.MILLISECONDS));
-        if (remaining > 0) {
-            return remaining;
-        }
-
-        throw new SocketTimeoutException("Socket connection timed out");
-    }
-
     private byte[] readSocksReply(final int length, final Timeout timeout) throws IOException {
         InputStream inputStream = getInputStream();
         byte[] data = new byte[length];
@@ -320,8 +310,14 @@ private byte[] readSocksReply(final int length, final Timeout timeout) throws IO
         try {
             while (received < length) {
                 int count;
-                int remaining = remainingMillis(timeout);
-                setSoTimeout(remaining);
+                timeout.checkedRun(MILLISECONDS, () -> {
+                    setSoTimeout(0);
+                }, (remainingMs) -> {
+                    setSoTimeout(Math.toIntExact(remainingMs));
+                }, () -> {
+                    throwSocketConnectionTimeout();
+                });
+
                 count = inputStream.read(data, received, length - received);
                 if (count < 0) {
                     throw new ConnectException("Malformed reply from SOCKS proxy server");
@@ -334,6 +330,10 @@ private byte[] readSocksReply(final int length, final Timeout timeout) throws IO
         return data;
     }
 
+    private static void throwSocketConnectionTimeout() throws SocketTimeoutException {
+        throw new SocketTimeoutException("Socket connection timed out");
+    }
+
     enum SocksCommand {
 
         CONNECT(0x01);
diff --git a/driver-core/src/main/com/mongodb/internal/connection/Stream.java b/driver-core/src/main/com/mongodb/internal/connection/Stream.java
index b26074d218f..317927f1715 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/Stream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/Stream.java
@@ -31,45 +31,38 @@ public interface Stream extends BufferProvider {
     /**
      * Open the stream.
      *
+     * @param operationContext the operation context
      * @throws IOException if an I/O error occurs
      */
-    void open() throws IOException;
+    void open(OperationContext operationContext) throws IOException;
 
     /**
      * Open the stream asynchronously.
      *
-     * @param handler the completion handler for opening the stream
+     * @param operationContext the operation context
+     * @param handler          the completion handler for opening the stream
      */
-    void openAsync(AsyncCompletionHandler<Void> handler);
+    void openAsync(OperationContext operationContext, AsyncCompletionHandler<Void> handler);
 
     /**
      * Write each buffer in the list to the stream in order, blocking until all are completely written.
      *
      * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers},
      * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that.
+     * @param operationContext the operation context
      * @throws IOException if there are problems writing to the stream
      */
-    void write(List<ByteBuf> buffers) throws IOException;
+    void write(List<ByteBuf> buffers, OperationContext operationContext) throws IOException;
 
     /**
      * Read from the stream, blocking until the requested number of bytes have been read.
      *
      * @param numBytes The number of bytes to read into the returned byte buffer
+     * @param operationContext the operation context
      * @return a byte buffer filled with number of bytes requested
      * @throws IOException if there are problems reading from the stream
      */
-    ByteBuf read(int numBytes) throws IOException;
-
-    /**
-     * Read from the stream, blocking until the requested number of bytes have been read.  If supported by the implementation,
-     * adds the given additional timeout to the configured timeout for the stream.
-     *
-     * @param numBytes The number of bytes to read into the returned byte buffer
-     * @param additionalTimeout additional timeout in milliseconds to add to the configured timeout
-     * @return a byte buffer filled with number of bytes requested
-     * @throws IOException if there are problems reading from the stream
-     */
-    ByteBuf read(int numBytes, int additionalTimeout) throws IOException;
+    ByteBuf read(int numBytes, OperationContext operationContext) throws IOException;
 
     /**
      * Write each buffer in the list to the stream in order, asynchronously.  This method should return immediately, and invoke the given
@@ -77,18 +70,20 @@ public interface Stream extends BufferProvider {
      *
      * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers},
      * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that.
+     * @param operationContext the operation context
      * @param handler invoked when the write operation has completed
      */
-    void writeAsync(List<ByteBuf> buffers, AsyncCompletionHandler<Void> handler);
+    void writeAsync(List<ByteBuf> buffers, OperationContext operationContext, AsyncCompletionHandler<Void> handler);
 
     /**
      * Read from the stream, asynchronously.  This method should return immediately, and invoke the given callback when the number of
      * requested bytes have been read.
      *
      * @param numBytes the number of bytes
+     * @param operationContext the operation context
      * @param handler invoked when the read operation has completed
      */
-    void readAsync(int numBytes, AsyncCompletionHandler<ByteBuf> handler);
+    void readAsync(int numBytes, OperationContext operationContext, AsyncCompletionHandler<ByteBuf> handler);
 
     /**
      * The address that this stream is connected to.
diff --git a/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java
index 8a822d03f6a..436fccb0996 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java
@@ -180,7 +180,7 @@ private static class TlsChannelStream extends AsynchronousChannelStream {
         }
 
         @Override
-        public void openAsync(final AsyncCompletionHandler<Void> handler) {
+        public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler<Void> handler) {
             isTrue("unopened", getChannel() == null);
             try {
                 SocketChannel socketChannel = SocketChannel.open();
diff --git a/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java
index e80909a2c79..de74b6c8d0f 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java
@@ -39,7 +39,7 @@ public UnixSocketChannelStream(final UnixServerAddress address, final SocketSett
     }
 
     @Override
-    protected Socket initializeSocket() throws IOException {
+    protected Socket initializeSocket(final OperationContext operationContext) throws IOException {
         return UnixSocketChannel.open(new UnixSocketAddress(address.getHost())).socket();
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java
index f0ae4a9244e..d0ec8a6ea51 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java
@@ -16,14 +16,12 @@
 
 package com.mongodb.internal.connection;
 
-import com.mongodb.RequestContext;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.event.ConnectionCreatedEvent;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
-import com.mongodb.internal.session.SessionContext;
 import org.bson.ByteBuf;
 import org.bson.codecs.Decoder;
 
@@ -51,8 +49,8 @@ class UsageTrackingInternalConnection implements InternalConnection {
     }
 
     @Override
-    public void open() {
-        wrapped.open();
+    public void open(final OperationContext operationContext) {
+        wrapped.open(operationContext);
         openedAt = System.currentTimeMillis();
         lastUsedAt = openedAt;
         if (getDescription().getServiceId() != null) {
@@ -61,8 +59,8 @@ public void open() {
     }
 
     @Override
-    public void openAsync(final SingleResultCallback<Void> callback) {
-        wrapped.openAsync((result, t) -> {
+    public void openAsync(final OperationContext operationContext, final SingleResultCallback<Void> callback) {
+        wrapped.openAsync(operationContext, (result, t) -> {
             if (t != null) {
                 callback.onResult(null, t);
             } else {
@@ -103,35 +101,27 @@ public ByteBuf getBuffer(final int size) {
     }
 
     @Override
-    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
-        wrapped.sendMessage(byteBuffers, lastRequestId);
+    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
+        wrapped.sendMessage(byteBuffers, lastRequestId, operationContext);
         lastUsedAt = System.currentTimeMillis();
     }
 
     @Override
-    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                                final RequestContext requestContext, final OperationContext operationContext) {
-        T result = wrapped.sendAndReceive(message, decoder, sessionContext, requestContext, operationContext);
+    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
+        T result = wrapped.sendAndReceive(message, decoder, operationContext);
         lastUsedAt = System.currentTimeMillis();
         return result;
     }
 
     @Override
-    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
-        wrapped.send(message, decoder, sessionContext);
+    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
+        wrapped.send(message, decoder, operationContext);
         lastUsedAt = System.currentTimeMillis();
     }
 
     @Override
-    public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
-        T result = wrapped.receive(decoder, sessionContext);
-        lastUsedAt = System.currentTimeMillis();
-        return result;
-    }
-
-    @Override
-    public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext, final int additionalTimeout) {
-        T result = wrapped.receive(decoder, sessionContext, additionalTimeout);
+    public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
+        T result = wrapped.receive(decoder, operationContext);
         lastUsedAt = System.currentTimeMillis();
         return result;
     }
@@ -142,39 +132,40 @@ public boolean hasMoreToCome() {
     }
 
     @Override
-    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
-            final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext,
+    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext,
             final SingleResultCallback<T> callback) {
         SingleResultCallback<T> errHandlingCallback = errorHandlingCallback((result, t) -> {
             lastUsedAt = System.currentTimeMillis();
             callback.onResult(result, t);
         }, LOGGER);
-        wrapped.sendAndReceiveAsync(message, decoder, sessionContext, requestContext, operationContext, errHandlingCallback);
+        wrapped.sendAndReceiveAsync(message, decoder, operationContext, errHandlingCallback);
     }
 
     @Override
-    public ResponseBuffers receiveMessage(final int responseTo) {
-        ResponseBuffers responseBuffers = wrapped.receiveMessage(responseTo);
+    public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
+        ResponseBuffers responseBuffers = wrapped.receiveMessage(responseTo, operationContext);
         lastUsedAt = System.currentTimeMillis();
         return responseBuffers;
     }
 
     @Override
-    public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final SingleResultCallback<Void> callback) {
+    public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext,
+            final SingleResultCallback<Void> callback) {
         SingleResultCallback<Void> errHandlingCallback = errorHandlingCallback((result, t) -> {
             lastUsedAt = System.currentTimeMillis();
             callback.onResult(result, t);
         }, LOGGER);
-        wrapped.sendMessageAsync(byteBuffers, lastRequestId, errHandlingCallback);
+        wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, errHandlingCallback);
     }
 
     @Override
-    public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+    public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+            final SingleResultCallback<ResponseBuffers> callback) {
         SingleResultCallback<ResponseBuffers> errHandlingCallback = errorHandlingCallback((result, t) -> {
             lastUsedAt = System.currentTimeMillis();
             callback.onResult(result, t);
         }, LOGGER);
-        wrapped.receiveMessageAsync(responseTo, errHandlingCallback);
+        wrapped.receiveMessageAsync(responseTo, operationContext, errHandlingCallback);
     }
 
     @Override
diff --git a/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java
index 257ad8969d7..b5e2dd0512d 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java
@@ -44,13 +44,14 @@ class X509Authenticator extends Authenticator implements SpeculativeAuthenticato
     }
 
     @Override
-    void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription) {
+    void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription,
+            final OperationContext operationContext) {
         if (this.speculativeAuthenticateResponse != null) {
             return;
         }
         try {
             BsonDocument authCommand = getAuthCommand(getMongoCredential().getUserName());
-            executeCommand(getMongoCredential().getSource(), authCommand, getClusterConnectionMode(), getServerApi(), connection);
+            executeCommand(getMongoCredential().getSource(), authCommand, getClusterConnectionMode(), getServerApi(), connection, operationContext);
         } catch (MongoCommandException e) {
             throw new MongoSecurityException(getMongoCredential(), "Exception authenticating", e);
         }
@@ -58,14 +59,14 @@ void authenticate(final InternalConnection connection, final ConnectionDescripti
 
     @Override
     void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription,
-                           final SingleResultCallback<Void> callback) {
+            final OperationContext operationContext, final SingleResultCallback<Void> callback) {
         if (speculativeAuthenticateResponse != null) {
             callback.onResult(null, null);
         } else {
             SingleResultCallback<Void> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
             try {
                 executeCommandAsync(getMongoCredential().getSource(), getAuthCommand(getMongoCredential().getUserName()),
-                        getClusterConnectionMode(), getServerApi(), connection,
+                        getClusterConnectionMode(), getServerApi(), connection, operationContext,
                         (nonceResult, t) -> {
                             if (t != null) {
                                 errHandlingCallback.onResult(null, translateThrowable(t));
diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java
index 1f3c6ec9a1b..b28054e7d3d 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java
@@ -27,6 +27,7 @@
 import com.mongodb.connection.AsyncCompletionHandler;
 import com.mongodb.connection.SocketSettings;
 import com.mongodb.connection.SslSettings;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.Stream;
 import com.mongodb.lang.Nullable;
 import com.mongodb.spi.dns.InetAddressResolver;
@@ -48,6 +49,7 @@
 import io.netty.handler.ssl.SslContext;
 import io.netty.handler.ssl.SslHandler;
 import io.netty.handler.timeout.ReadTimeoutException;
+import io.netty.handler.timeout.WriteTimeoutHandler;
 import org.bson.ByteBuf;
 
 import javax.net.ssl.SSLContext;
@@ -59,6 +61,7 @@
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Optional;
 import java.util.Queue;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Future;
@@ -67,7 +70,6 @@
 import java.util.concurrent.locks.ReentrantLock;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
-import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.internal.Locks.withLock;
 import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses;
 import static com.mongodb.internal.connection.SslHelper.enableHostNameVerification;
@@ -80,7 +82,8 @@
  * A Stream implementation based on Netty 4.0.
  * Just like it is for the {@link java.nio.channels.AsynchronousSocketChannel},
  * concurrent pending<sup>1</sup> readers
- * (whether {@linkplain #read(int, int) synchronous} or {@linkplain #readAsync(int, AsyncCompletionHandler) asynchronous})
+ * (whether {@linkplain #read(int, OperationContext) synchronous} or
+ * {@linkplain #readAsync(int, OperationContext, AsyncCompletionHandler) asynchronous})
  * are not supported by {@link NettyStream}.
  * However, this class does not have a fail-fast mechanism checking for such situations.
  * <hr>
@@ -105,7 +108,7 @@
  * int1 -> inv2 -> ret2
  *      \--------> ret1
  * }</pre>
- * As shown on the diagram, the method {@link #readAsync(int, AsyncCompletionHandler)} runs concurrently with
+ * As shown on the diagram, the method {@link #readAsync(int, OperationContext, AsyncCompletionHandler)} runs concurrently with
  * itself in the example above. However, there are no concurrent pending readers because the second operation
  * is invoked after the first operation has completed reading despite the method has not returned yet.
  */
@@ -137,7 +140,6 @@ final class NettyStream implements Stream {
      * these fields can be plain.*/
     @Nullable
     private ReadTimeoutTask readTimeoutTask;
-    private long readTimeoutMillis = NO_SCHEDULE_TIME;
 
     NettyStream(final ServerAddress address, final InetAddressResolver inetAddressResolver, final SocketSettings settings,
             final SslSettings sslSettings, final EventLoopGroup workerGroup,
@@ -159,15 +161,14 @@ public ByteBuf getBuffer(final int size) {
     }
 
     @Override
-    public void open() throws IOException {
+    public void open(final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<Void> handler = new FutureAsyncCompletionHandler<>();
-        openAsync(handler);
+        openAsync(operationContext, handler);
         handler.get();
     }
 
-    @SuppressWarnings("deprecation")
     @Override
-    public void openAsync(final AsyncCompletionHandler<Void> handler) {
+    public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler<Void> handler) {
         Queue<SocketAddress> socketAddressQueue;
 
         try {
@@ -177,10 +178,11 @@ public void openAsync(final AsyncCompletionHandler<Void> handler) {
             return;
         }
 
-        initializeChannel(handler, socketAddressQueue);
+        initializeChannel(operationContext, handler, socketAddressQueue);
     }
 
-    private void initializeChannel(final AsyncCompletionHandler<Void> handler, final Queue<SocketAddress> socketAddressQueue) {
+    private void initializeChannel(final OperationContext operationContext, final AsyncCompletionHandler<Void> handler,
+            final Queue<SocketAddress> socketAddressQueue) {
         if (socketAddressQueue.isEmpty()) {
             handler.failed(new MongoSocketException("Exception opening socket", getAddress()));
         } else {
@@ -189,8 +191,8 @@ private void initializeChannel(final AsyncCompletionHandler<Void> handler, final
             Bootstrap bootstrap = new Bootstrap();
             bootstrap.group(workerGroup);
             bootstrap.channel(socketChannelClass);
-
-            bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, settings.getConnectTimeout(MILLISECONDS));
+            bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS,
+                    operationContext.getTimeoutContext().getConnectTimeoutMs());
             bootstrap.option(ChannelOption.TCP_NODELAY, true);
             bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
 
@@ -210,46 +212,36 @@ public void initChannel(final SocketChannel ch) {
                         addSslHandler(ch);
                     }
 
-                    int readTimeout = settings.getReadTimeout(MILLISECONDS);
-                    if (readTimeout > NO_SCHEDULE_TIME) {
-                        readTimeoutMillis = readTimeout;
-                        /* We need at least one handler before (in the inbound evaluation order) the InboundBufferHandler,
-                         * so that we can fire exception events (they are inbound events) using its context and the InboundBufferHandler
-                         * receives them. SslHandler is not always present, so adding a NOOP handler.*/
-                        pipeline.addLast(new ChannelInboundHandlerAdapter());
-                        readTimeoutTask = new ReadTimeoutTask(pipeline.lastContext());
-                    }
-
-                    pipeline.addLast(new InboundBufferHandler());
+                    /* We need at least one handler before (in the inbound evaluation order) the InboundBufferHandler,
+                     * so that we can fire exception events (they are inbound events) using its context and the InboundBufferHandler
+                     * receives them. SslHandler is not always present, so adding a NOOP handler.*/
+                    pipeline.addLast("ChannelInboundHandlerAdapter",  new ChannelInboundHandlerAdapter());
+                    readTimeoutTask = new ReadTimeoutTask(pipeline.lastContext());
+                    pipeline.addLast("InboundBufferHandler", new InboundBufferHandler());
                 }
             });
             ChannelFuture channelFuture = bootstrap.connect(nextAddress);
-            channelFuture.addListener(new OpenChannelFutureListener(socketAddressQueue, channelFuture, handler));
+            channelFuture.addListener(new OpenChannelFutureListener(operationContext, socketAddressQueue, channelFuture, handler));
         }
     }
 
     @Override
-    public void write(final List<ByteBuf> buffers) throws IOException {
+    public void write(final List<ByteBuf> buffers, final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<Void> future = new FutureAsyncCompletionHandler<>();
-        writeAsync(buffers, future);
+        writeAsync(buffers, operationContext, future);
         future.get();
     }
 
     @Override
-    public ByteBuf read(final int numBytes) throws IOException {
-        return read(numBytes, 0);
-    }
-
-    @Override
-    public ByteBuf read(final int numBytes, final int additionalTimeoutMillis) throws IOException {
-        isTrueArgument("additionalTimeoutMillis must not be negative", additionalTimeoutMillis >= 0);
+    public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException {
         FutureAsyncCompletionHandler<ByteBuf> future = new FutureAsyncCompletionHandler<>();
-        readAsync(numBytes, future, combinedTimeout(readTimeoutMillis, additionalTimeoutMillis));
+        readAsync(numBytes, future, operationContext.getTimeoutContext().getReadTimeoutMS());
         return future.get();
     }
 
     @Override
-    public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler<Void> handler) {
+    public void writeAsync(final List<ByteBuf> buffers, final OperationContext operationContext,
+            final AsyncCompletionHandler<Void> handler) {
         CompositeByteBuf composite = PooledByteBufAllocator.DEFAULT.compositeBuffer();
         for (ByteBuf cur : buffers) {
             // The Netty framework releases `CompositeByteBuf` after writing
@@ -260,7 +252,10 @@ public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler
             composite.addComponent(true, ((NettyByteBuf) cur).asByteBuf().retain());
         }
 
+        long writeTimeoutMS = operationContext.getTimeoutContext().getWriteTimeoutMS();
+        final Optional<WriteTimeoutHandler> writeTimeoutHandler = addWriteTimeoutHandler(writeTimeoutMS);
         channel.writeAndFlush(composite).addListener((ChannelFutureListener) future -> {
+            writeTimeoutHandler.map(w -> channel.pipeline().remove(w));
             if (!future.isSuccess()) {
                 handler.failed(future.cause());
             } else {
@@ -269,9 +264,18 @@ public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler
         });
     }
 
+    private Optional<WriteTimeoutHandler> addWriteTimeoutHandler(final long writeTimeoutMS) {
+        if (writeTimeoutMS != NO_SCHEDULE_TIME) {
+            WriteTimeoutHandler writeTimeoutHandler = new WriteTimeoutHandler(writeTimeoutMS, MILLISECONDS);
+            channel.pipeline().addBefore("ChannelInboundHandlerAdapter", "WriteTimeoutHandler", writeTimeoutHandler);
+            return Optional.of(writeTimeoutHandler);
+        }
+        return Optional.empty();
+    }
+
     @Override
-    public void readAsync(final int numBytes, final AsyncCompletionHandler<ByteBuf> handler) {
-        readAsync(numBytes, handler, readTimeoutMillis);
+    public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler<ByteBuf> handler) {
+        readAsync(numBytes, handler, operationContext.getTimeoutContext().getReadTimeoutMS());
     }
 
     /**
@@ -501,9 +505,12 @@ private class OpenChannelFutureListener implements ChannelFutureListener {
         private final Queue<SocketAddress> socketAddressQueue;
         private final ChannelFuture channelFuture;
         private final AsyncCompletionHandler<Void> handler;
+        private final OperationContext operationContext;
 
-        OpenChannelFutureListener(final Queue<SocketAddress> socketAddressQueue, final ChannelFuture channelFuture,
-                                  final AsyncCompletionHandler<Void> handler) {
+        OpenChannelFutureListener(final OperationContext operationContext,
+                final Queue<SocketAddress> socketAddressQueue, final ChannelFuture channelFuture,
+                final AsyncCompletionHandler<Void> handler) {
+            this.operationContext = operationContext;
             this.socketAddressQueue = socketAddressQueue;
             this.channelFuture = channelFuture;
             this.handler = handler;
@@ -526,7 +533,7 @@ public void operationComplete(final ChannelFuture future) {
                     } else if (socketAddressQueue.isEmpty()) {
                         handler.failed(new MongoSocketOpenException("Exception opening socket", getAddress(), future.cause()));
                     } else {
-                        initializeChannel(handler, socketAddressQueue);
+                        initializeChannel(operationContext, handler, socketAddressQueue);
                     }
                 }
             });
@@ -539,14 +546,6 @@ private static void cancel(@Nullable final Future<?> f) {
         }
     }
 
-    private static long combinedTimeout(final long timeout, final int additionalTimeout) {
-        if (timeout == NO_SCHEDULE_TIME) {
-            return NO_SCHEDULE_TIME;
-        } else {
-            return Math.addExact(timeout, additionalTimeout);
-        }
-    }
-
     @Nullable
     private static ScheduledFuture<?> scheduleReadTimeout(@Nullable final ReadTimeoutTask readTimeoutTask, final long timeoutMillis) {
         if (timeoutMillis == NO_SCHEDULE_TIME) {
@@ -576,9 +575,9 @@ public void run() {
             }
         }
 
+        @Nullable
         private ScheduledFuture<?> schedule(final long timeoutMillis) {
-            //assert timeoutMillis > 0 : timeoutMillis;
-            return ctx.executor().schedule(this, timeoutMillis, MILLISECONDS);
+            return timeoutMillis > 0 ? ctx.executor().schedule(this, timeoutMillis, MILLISECONDS) : null;
         }
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java
index f1c87fabee5..3c845ce6d08 100644
--- a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java
+++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java
@@ -554,6 +554,9 @@ private int handshake(Optional<ByteBufferSet> dest, Optional<HandshakeStatus> ha
     try {
       writeLock.lock();
       try {
+        if (invalid || shutdownSent) {
+            throw new ClosedChannelException();
+        }
         Util.assertTrue(inPlain.nullOrEmpty());
         outEncrypted.prepare();
         try {
diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java
new file mode 100644
index 00000000000..5c178f8ed33
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.function;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+@FunctionalInterface
+public interface CheckedConsumer<T, E extends Exception> {
+
+    /**
+     * Performs this operation on the given argument.
+     *
+     * @param t the input argument
+     * @throws E the checked exception to throw
+     */
+    void accept(T t) throws E;
+}
diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java
new file mode 100644
index 00000000000..39b280aa561
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.function;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+@FunctionalInterface
+public interface CheckedFunction<T, R, E extends Exception> {
+
+    /**
+     * Applies the function to the given argument.
+     *
+     * @param t the function argument
+     * @return the function result
+     * @throws E the checked exception to throw
+     */
+    R apply(T t) throws E;
+}
diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java
new file mode 100644
index 00000000000..f5b24c28a72
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.function;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+@FunctionalInterface
+public interface CheckedRunnable<E extends Exception> {
+
+    /**
+     * Checked run.
+     *
+     * @throws E the checked exception to throw
+     */
+    void run() throws E;
+}
diff --git a/driver-core/src/main/com/mongodb/internal/CheckedSupplier.java b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java
similarity index 95%
rename from driver-core/src/main/com/mongodb/internal/CheckedSupplier.java
rename to driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java
index c75145eb942..ab39e5c824a 100644
--- a/driver-core/src/main/com/mongodb/internal/CheckedSupplier.java
+++ b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-package com.mongodb.internal;
+package com.mongodb.internal.function;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
diff --git a/driver-core/src/main/com/mongodb/internal/function/package-info.java b/driver-core/src/main/com/mongodb/internal/function/package-info.java
new file mode 100644
index 00000000000..baea9b145ec
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/function/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ */
+
+@NonNullApi
+package com.mongodb.internal.function;
+
+import com.mongodb.lang.NonNullApi;
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java
index 13166eb53ab..bbd7ce7300e 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java
@@ -18,10 +18,12 @@
 
 import com.mongodb.Function;
 import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
+import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
 
 /**
  * An operation that aborts a transaction.
@@ -47,15 +49,17 @@ protected String getCommandName() {
 
     @Override
     CommandCreator getCommandCreator() {
-        CommandCreator creator = super.getCommandCreator();
-        if (recoveryToken != null) {
-            return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken);
-        }
-        return creator;
+        return (operationContext, serverDescription, connectionDescription) -> {
+            operationContext.getTimeoutContext().resetToDefaultMaxTime();
+            BsonDocument command = AbortTransactionOperation.super.getCommandCreator()
+                    .create(operationContext, serverDescription, connectionDescription);
+            putIfNotNull(command, "recoveryToken", recoveryToken);
+            return command;
+        };
     }
 
     @Override
-    protected Function<BsonDocument, BsonDocument> getRetryCommandModifier() {
+    protected Function<BsonDocument, BsonDocument> getRetryCommandModifier(final TimeoutContext timeoutContext) {
         return cmd -> cmd;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java
index 82da3fc7646..8410a030185 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java
@@ -25,12 +25,12 @@
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 
-import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync;
-import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer;
-import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync;
 import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection;
+import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync;
+import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand;
 import static com.mongodb.internal.operation.SyncOperationHelper.withConnection;
+import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer;
 
 /**
  * An abstract class for defining operations for managing Atlas Search indexes.
@@ -40,15 +40,17 @@
 abstract class AbstractWriteSearchIndexOperation implements AsyncWriteOperation<Void>, WriteOperation<Void> {
     private final MongoNamespace namespace;
 
-    AbstractWriteSearchIndexOperation(final MongoNamespace mongoNamespace) {
-        this.namespace = mongoNamespace;
+    AbstractWriteSearchIndexOperation(final MongoNamespace namespace) {
+        this.namespace = namespace;
     }
 
     @Override
     public Void execute(final WriteBinding binding) {
         return withConnection(binding, connection -> {
             try {
-                executeCommand(binding, namespace.getDatabaseName(), buildCommand(), connection, writeConcernErrorTransformer());
+                executeCommand(binding, namespace.getDatabaseName(), buildCommand(),
+                        connection,
+                        writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()));
             } catch (MongoCommandException mongoCommandException) {
                 swallowOrThrow(mongoCommandException);
             }
@@ -61,7 +63,7 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall
         withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback,
                 (connectionSource, connection, cb) ->
                         executeCommandAsync(binding, namespace.getDatabaseName(), buildCommand(), connection,
-                                writeConcernErrorTransformerAsync(), (result, commandExecutionError) -> {
+                                writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, commandExecutionError) -> {
                                     try {
                                         swallowOrThrow(commandExecutionError);
                                         callback.onResult(result, null);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java
index 857c14b857c..07943560b40 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java
@@ -18,20 +18,19 @@
 
 import com.mongodb.ExplainVerbosity;
 import com.mongodb.MongoNamespace;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.internal.client.model.AggregationLevel;
-import com.mongodb.internal.connection.NoOpSessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonValue;
 import org.bson.codecs.Decoder;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand;
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION;
@@ -49,7 +48,7 @@ public AggregateOperation(final MongoNamespace namespace, final List<BsonDocumen
     }
 
     public AggregateOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline, final Decoder<T> decoder,
-                              final AggregationLevel aggregationLevel) {
+            final AggregationLevel aggregationLevel) {
         this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, decoder, aggregationLevel);
     }
 
@@ -75,24 +74,6 @@ public AggregateOperation<T> batchSize(@Nullable final Integer batchSize) {
         return this;
     }
 
-    public long getMaxAwaitTime(final TimeUnit timeUnit) {
-        return wrapped.getMaxAwaitTime(timeUnit);
-    }
-
-    public AggregateOperation<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        wrapped.maxAwaitTime(maxAwaitTime, timeUnit);
-        return this;
-    }
-
-    public long getMaxTime(final TimeUnit timeUnit) {
-        return wrapped.getMaxTime(timeUnit);
-    }
-
-    public AggregateOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        wrapped.maxTime(maxTime, timeUnit);
-        return this;
-    }
-
     public Collation getCollation() {
         return wrapped.getCollation();
     }
@@ -148,6 +129,11 @@ public AggregateOperation<T> hint(@Nullable final BsonValue hint) {
         return this;
     }
 
+    public AggregateOperation<T> timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
         return wrapped.execute(binding);
@@ -159,24 +145,22 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb
     }
 
     public <R> ReadOperation<R> asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder<R> resultDecoder) {
-        return new CommandReadOperation<>(getNamespace().getDatabaseName(),
-                asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity),
-                resultDecoder);
+        return createExplainableOperation(verbosity, resultDecoder);
     }
 
     public <R> AsyncReadOperation<R> asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity,
                                                                  final Decoder<R> resultDecoder) {
-        return new CommandReadOperation<>(getNamespace().getDatabaseName(),
-                asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity),
-                resultDecoder);
+        return createExplainableOperation(verbosity, resultDecoder);
     }
 
+    <R> CommandReadOperation<R> createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder<R> resultDecoder) {
+        return new CommandReadOperation<>(getNamespace().getDatabaseName(),
+                (operationContext, serverDescription, connectionDescription) ->
+                        asExplainCommand(wrapped.getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder);
+    }
 
     MongoNamespace getNamespace() {
         return wrapped.getNamespace();
     }
 
-    Decoder<T> getDecoder() {
-        return wrapped.getDecoder();
-    }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java
index ff6b55bac48..7ba2c56b874 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java
@@ -16,27 +16,29 @@
 
 package com.mongodb.internal.operation;
 
+import com.mongodb.CursorType;
 import com.mongodb.MongoNamespace;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.internal.client.model.AggregationLevel;
-import com.mongodb.internal.session.SessionContext;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
 import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 import org.bson.BsonString;
 import org.bson.BsonValue;
 import org.bson.codecs.Decoder;
 
 import java.util.Arrays;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
@@ -45,6 +47,7 @@
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync;
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
+import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier;
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead;
@@ -54,7 +57,6 @@ class AggregateOperationImpl<T> implements AsyncReadOperation<AsyncBatchCursor<T
     private static final String CURSOR = "cursor";
     private static final String FIRST_BATCH = "firstBatch";
     private static final List<String> FIELD_NAMES_WITH_RESULT = Arrays.asList(RESULT, FIRST_BATCH);
-
     private final MongoNamespace namespace;
     private final List<BsonDocument> pipeline;
     private final Decoder<T> decoder;
@@ -67,18 +69,21 @@ class AggregateOperationImpl<T> implements AsyncReadOperation<AsyncBatchCursor<T
     private Collation collation;
     private BsonValue comment;
     private BsonValue hint;
-    private long maxAwaitTimeMS;
-    private long maxTimeMS;
     private BsonDocument variables;
+    private TimeoutMode timeoutMode;
+    private CursorType cursorType;
 
-    AggregateOperationImpl(final MongoNamespace namespace, final List<BsonDocument> pipeline, final Decoder<T> decoder,
-                           final AggregationLevel aggregationLevel) {
-        this(namespace, pipeline, decoder, defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel),
-                notNull("namespace", namespace).getCollectionName()), defaultPipelineCreator(pipeline));
+    AggregateOperationImpl(final MongoNamespace namespace,
+            final List<BsonDocument> pipeline, final Decoder<T> decoder, final AggregationLevel aggregationLevel) {
+        this(namespace, pipeline, decoder,
+                defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel),
+                        notNull("namespace", namespace).getCollectionName()),
+                defaultPipelineCreator(pipeline));
     }
 
-    AggregateOperationImpl(final MongoNamespace namespace, final List<BsonDocument> pipeline, final Decoder<T> decoder,
-                           final AggregateTarget aggregateTarget, final PipelineCreator pipelineCreator) {
+    AggregateOperationImpl(final MongoNamespace namespace,
+            final List<BsonDocument> pipeline, final Decoder<T> decoder, final AggregateTarget aggregateTarget,
+            final PipelineCreator pipelineCreator) {
         this.namespace = notNull("namespace", namespace);
         this.pipeline = notNull("pipeline", pipeline);
         this.decoder = notNull("decoder", decoder);
@@ -116,30 +121,6 @@ AggregateOperationImpl<T> batchSize(@Nullable final Integer batchSize) {
         return this;
     }
 
-    long getMaxAwaitTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    AggregateOperationImpl<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0);
-        this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
-        return this;
-    }
-
-    long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    AggregateOperationImpl<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxTime >= 0", maxTime >= 0);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     Collation getCollation() {
         return collation;
     }
@@ -169,6 +150,19 @@ AggregateOperationImpl<T> retryReads(final boolean retryReads) {
         return this;
     }
 
+    /**
+     * When {@link TimeoutContext#hasTimeoutMS()} then {@link TimeoutSettings#getMaxAwaitTimeMS()} usage in {@code getMore} commands
+     * depends on the type of cursor. For {@link CursorType#TailableAwait} it is used, for others it is not.
+     * {@link CursorType#TailableAwait} is used mainly for change streams in {@link AggregateOperationImpl}.
+     *
+     * @param cursorType
+     * @return this
+     */
+    AggregateOperationImpl<T> cursorType(final CursorType cursorType) {
+        this.cursorType = cursorType;
+        return this;
+    }
+
     boolean getRetryReads() {
         return retryReads;
     }
@@ -178,6 +172,13 @@ BsonValue getHint() {
         return hint;
     }
 
+    public AggregateOperationImpl<T> timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        if (timeoutMode != null) {
+            this.timeoutMode = timeoutMode;
+        }
+        return this;
+    }
+
     AggregateOperationImpl<T> hint(@Nullable final BsonValue hint) {
         isTrueArgument("BsonString or BsonDocument", hint == null || hint.isDocument() || hint.isString());
         this.hint = hint;
@@ -186,31 +187,30 @@ AggregateOperationImpl<T> hint(@Nullable final BsonValue hint) {
 
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), transformer(), retryReads);
+        return executeRetryableRead(binding, namespace.getDatabaseName(),
+                getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT),
+                transformer(), retryReads);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
         SingleResultCallback<AsyncBatchCursor<T>> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
-       executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-               CommandResultDocumentCodec.create(this.decoder, FIELD_NAMES_WITH_RESULT), asyncTransformer(), retryReads,
-               errHandlingCallback);
+        executeRetryableReadAsync(binding, namespace.getDatabaseName(),
+                getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT),
+                asyncTransformer(), retryReads,
+                errHandlingCallback);
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion());
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) ->
+                getCommand(operationContext, connectionDescription.getMaxWireVersion());
     }
 
-    BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) {
+    BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) {
         BsonDocument commandDocument = new BsonDocument("aggregate", aggregateTarget.create());
-
-        appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument);
+        appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument);
         commandDocument.put("pipeline", pipelineCreator.create());
-        if (maxTimeMS > 0) {
-            commandDocument.put("maxTimeMS", maxTimeMS > Integer.MAX_VALUE
-                    ? new BsonInt64(maxTimeMS) : new BsonInt32((int) maxTimeMS));
-        }
+        setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext);
         BsonDocument cursor = new BsonDocument();
         if (batchSize != null) {
             cursor.put("batchSize", new BsonInt32(batchSize));
@@ -237,14 +237,30 @@ BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVe
 
     private CommandReadTransformer<BsonDocument, CommandBatchCursor<T>> transformer() {
         return (result, source, connection) ->
-                new CommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder,
-                        comment, source, connection);
+                new CommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0,
+                        getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection);
     }
 
     private CommandReadTransformerAsync<BsonDocument, AsyncBatchCursor<T>> asyncTransformer() {
         return (result, source, connection) ->
-                new AsyncCommandBatchCursor<>(result, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder,
-                        comment, source, connection);
+            new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0,
+                    getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection);
+    }
+
+    private TimeoutMode getTimeoutMode() {
+        TimeoutMode localTimeoutMode = timeoutMode;
+        if (localTimeoutMode == null) {
+            localTimeoutMode = TimeoutMode.CURSOR_LIFETIME;
+        }
+        return localTimeoutMode;
+    }
+
+    private long getMaxTimeForCursor(final TimeoutContext timeoutContext) {
+        long maxAwaitTimeMS = timeoutContext.getMaxAwaitTimeMS();
+        if (timeoutContext.hasTimeoutMS()){
+           return CursorType.TailableAwait == cursorType ? maxAwaitTimeMS : 0;
+        }
+        return maxAwaitTimeMS;
     }
 
     interface AggregateTarget {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java
index f41d0e4a462..904f85042ac 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java
@@ -20,6 +20,7 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
@@ -30,13 +31,11 @@
 import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 import org.bson.BsonString;
 import org.bson.BsonValue;
 import org.bson.codecs.BsonDocumentCodec;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
@@ -63,35 +62,19 @@ public class AggregateToCollectionOperation implements AsyncReadOperation<Void>,
     private final AggregationLevel aggregationLevel;
 
     private Boolean allowDiskUse;
-    private long maxTimeMS;
     private Boolean bypassDocumentValidation;
     private Collation collation;
     private BsonValue comment;
     private BsonValue hint;
     private BsonDocument variables;
 
-    public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline) {
-        this(namespace, pipeline, null, null, AggregationLevel.COLLECTION);
-    }
-
-    public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline,
-                                          final WriteConcern writeConcern) {
-        this(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION);
-    }
-
-    public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline,
-                                          final ReadConcern readConcern) {
-        this(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION);
-    }
-
-    public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline,
-                                          final ReadConcern readConcern, final WriteConcern writeConcern) {
+    public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline, final ReadConcern readConcern,
+            final WriteConcern writeConcern) {
         this(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION);
     }
 
     public AggregateToCollectionOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline,
-                                          @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern,
-                                          final AggregationLevel aggregationLevel) {
+            @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, final AggregationLevel aggregationLevel) {
         this.namespace = notNull("namespace", namespace);
         this.pipeline = notNull("pipeline", pipeline);
         this.writeConcern = writeConcern;
@@ -122,17 +105,6 @@ public AggregateToCollectionOperation allowDiskUse(@Nullable final Boolean allow
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public AggregateToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public Boolean getBypassDocumentValidation() {
         return bypassDocumentValidation;
     }
@@ -174,15 +146,20 @@ public AggregateToCollectionOperation hint(@Nullable final BsonValue hint) {
         return this;
     }
 
+    public AggregateToCollectionOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        isTrueArgument("timeoutMode cannot be ITERATION.", timeoutMode == null || timeoutMode.equals(TimeoutMode.CURSOR_LIFETIME));
+        return this;
+    }
+
     @Override
     public Void execute(final ReadBinding binding) {
         return executeRetryableRead(binding,
-                () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()),
-                namespace.getDatabaseName(),
-                (serverDescription, connectionDescription) -> getCommand(),
-                new BsonDocumentCodec(), (result, source, connection) -> {
+                                    () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()),
+                                    namespace.getDatabaseName(),
+                                    getCommandCreator(),
+                                    new BsonDocumentCodec(), (result, source, connection) -> {
                     throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
-                            connection.getDescription().getMaxWireVersion());
+                            connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext());
                     return null;
                 }, false);
     }
@@ -190,53 +167,51 @@ public Void execute(final ReadBinding binding) {
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<Void> callback) {
         executeRetryableReadAsync(binding,
-                (connectionSourceCallback) -> {
-                        binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback);
-                },
-                namespace.getDatabaseName(),
-                (serverDescription, connectionDescription) -> getCommand(),
-                new BsonDocumentCodec(), (result, source, connection) -> {
+                                  (connectionSourceCallback) ->
+                        binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback),
+                                  namespace.getDatabaseName(),
+                                  getCommandCreator(),
+                                  new BsonDocumentCodec(), (result, source, connection) -> {
                     throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
-                            connection.getDescription().getMaxWireVersion());
+                            connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext());
                     return null;
                 }, false, callback);
     }
 
-    private BsonDocument getCommand() {
-        BsonValue aggregationTarget = (aggregationLevel == AggregationLevel.DATABASE)
-                ? new BsonInt32(1) : new BsonString(namespace.getCollectionName());
-
-        BsonDocument commandDocument = new BsonDocument("aggregate", aggregationTarget);
-        commandDocument.put("pipeline", new BsonArray(pipeline));
-        if (maxTimeMS > 0) {
-            commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
-        if (allowDiskUse != null) {
-            commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse));
-        }
-        if (bypassDocumentValidation != null) {
-            commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation));
-        }
-
-        commandDocument.put("cursor", new BsonDocument());
-
-        appendWriteConcernToCommand(writeConcern, commandDocument);
-        if (readConcern != null && !readConcern.isServerDefault()) {
-            commandDocument.put("readConcern", readConcern.asDocument());
-        }
-
-        if (collation != null) {
-            commandDocument.put("collation", collation.asDocument());
-        }
-        if (comment != null) {
-            commandDocument.put("comment", comment);
-        }
-        if (hint != null) {
-            commandDocument.put("hint", hint);
-        }
-        if (variables != null) {
-            commandDocument.put("let", variables);
-        }
-        return commandDocument;
+    private CommandOperationHelper.CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonValue aggregationTarget = (aggregationLevel == AggregationLevel.DATABASE)
+                    ? new BsonInt32(1) : new BsonString(namespace.getCollectionName());
+
+            BsonDocument commandDocument = new BsonDocument("aggregate", aggregationTarget);
+            commandDocument.put("pipeline", new BsonArray(pipeline));
+            if (allowDiskUse != null) {
+                commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse));
+            }
+            if (bypassDocumentValidation != null) {
+                commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation));
+            }
+
+            commandDocument.put("cursor", new BsonDocument());
+
+            appendWriteConcernToCommand(writeConcern, commandDocument);
+            if (readConcern != null && !readConcern.isServerDefault()) {
+                commandDocument.put("readConcern", readConcern.asDocument());
+            }
+
+            if (collation != null) {
+                commandDocument.put("collation", collation.asDocument());
+            }
+            if (comment != null) {
+                commandDocument.put("comment", comment);
+            }
+            if (hint != null) {
+                commandDocument.put("hint", hint);
+            }
+            if (variables != null) {
+                commandDocument.put("let", variables);
+            }
+            return commandDocument;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java
index 7e55f05cac5..a4cfbafedb6 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java
@@ -17,6 +17,7 @@
 package com.mongodb.internal.operation;
 
 import com.mongodb.MongoException;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -42,6 +43,7 @@
 
 final class AsyncChangeStreamBatchCursor<T> implements AsyncAggregateResponseBatchCursor<T> {
     private final AsyncReadBinding binding;
+    private final TimeoutContext timeoutContext;
     private final ChangeStreamOperation<T> changeStreamOperation;
     private final int maxWireVersion;
 
@@ -63,6 +65,7 @@ final class AsyncChangeStreamBatchCursor<T> implements AsyncAggregateResponseBat
         this.wrapped = new AtomicReference<>(assertNotNull(wrapped));
         this.binding = binding;
         binding.retain();
+        this.timeoutContext = binding.getOperationContext().getTimeoutContext();
         this.resumeToken = resumeToken;
         this.maxWireVersion = maxWireVersion;
         isClosed = new AtomicBoolean();
@@ -80,6 +83,7 @@ public void next(final SingleResultCallback<List<T>> callback) {
 
     @Override
     public void close() {
+        timeoutContext.resetTimeoutIfPresent();
         if (isClosed.compareAndSet(false, true)) {
             try {
                 nullifyAndCloseWrapped();
@@ -177,6 +181,7 @@ private interface AsyncBlock {
     }
 
     private void resumeableOperation(final AsyncBlock asyncBlock, final SingleResultCallback<List<T>> callback, final boolean tryNext) {
+        timeoutContext.resetTimeoutIfPresent();
         SingleResultCallback<List<T>> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
         if (isClosed()) {
             errHandlingCallback.onResult(null, new MongoException(format("%s called after the cursor was closed.",
@@ -219,12 +224,12 @@ private void retryOperation(final AsyncBlock asyncBlock, final SingleResultCallb
                 changeStreamOperation.setChangeStreamOptionsForResume(resumeToken,
                         assertNotNull(source).getServerDescription().getMaxWireVersion());
                 source.release();
-                changeStreamOperation.executeAsync(binding, (result, t1) -> {
+                changeStreamOperation.executeAsync(binding, (asyncBatchCursor, t1) -> {
                     if (t1 != null) {
                         callback.onResult(null, t1);
                     } else {
                         try {
-                            setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor<T>) result).getWrapped());
+                            setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor<T>) asyncBatchCursor).getWrapped());
                         } finally {
                             try {
                                 binding.release(); // release the new change stream batch cursor's reference to the binding
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java
index 4831650f7ff..eec8721fbf1 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java
@@ -18,13 +18,16 @@
 
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSocketException;
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerAddress;
 import com.mongodb.ServerCursor;
 import com.mongodb.annotations.ThreadSafe;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerType;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -32,6 +35,7 @@
 import com.mongodb.internal.binding.AsyncConnectionSource;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.operation.AsyncOperationHelper.AsyncCallableConnectionWithCallback;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -71,6 +75,7 @@ class AsyncCommandBatchCursor<T> implements AsyncAggregateResponseBatchCursor<T>
     private volatile CommandCursorResult<T> commandCursorResult;
 
     AsyncCommandBatchCursor(
+            final TimeoutMode timeoutMode,
             final BsonDocument commandCursorDocument,
             final int batchSize, final long maxTimeMS,
             final Decoder<T> decoder,
@@ -87,14 +92,18 @@ class AsyncCommandBatchCursor<T> implements AsyncAggregateResponseBatchCursor<T>
         this.maxWireVersion = connectionDescription.getMaxWireVersion();
         this.firstBatchEmpty = commandCursorResult.getResults().isEmpty();
 
+        connectionSource.getOperationContext().getTimeoutContext().setMaxTimeOverride(maxTimeMS);
+
         AsyncConnection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER
                 ? connection : null;
-        resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor());
+        resourceManager = new ResourceManager(timeoutMode, namespace, connectionSource, connectionToPin,
+                commandCursorResult.getServerCursor());
     }
 
     @Override
     public void next(final SingleResultCallback<List<T>> callback) {
         resourceManager.execute(funcCallback -> {
+            resourceManager.checkTimeoutModeAndResetTimeoutContextIfIteration();
             ServerCursor localServerCursor = resourceManager.getServerCursor();
             boolean serverCursorIsNull = localServerCursor == null;
             List<T> batchResults = emptyList();
@@ -167,10 +176,10 @@ private void getMore(final ServerCursor cursor, final SingleResultCallback<List<
     private void getMoreLoop(final AsyncConnection connection, final ServerCursor serverCursor,
             final SingleResultCallback<List<T>> callback) {
         connection.commandAsync(namespace.getDatabaseName(),
-                getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, maxTimeMS, comment),
+                getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment),
                 NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(),
                 CommandResultDocumentCodec.create(decoder, NEXT_BATCH),
-                assertNotNull(resourceManager.getConnectionSource()),
+                assertNotNull(resourceManager.getConnectionSource()).getOperationContext(),
                 (commandResult, t) -> {
                     if (t != null) {
                         Throwable translatedException =
@@ -207,15 +216,21 @@ private CommandCursorResult<T> toCommandCursorResult(final ServerAddress serverA
         return commandCursorResult;
     }
 
+    void setCloseWithoutTimeoutReset(final boolean closeWithoutTimeoutReset) {
+        this.resourceManager.setCloseWithoutTimeoutReset(closeWithoutTimeoutReset);
+    }
+
     @ThreadSafe
     private static final class ResourceManager extends CursorResourceManager<AsyncConnectionSource, AsyncConnection> {
 
         ResourceManager(
+                final TimeoutMode timeoutMode,
                 final MongoNamespace namespace,
                 final AsyncConnectionSource connectionSource,
                 @Nullable final AsyncConnection connectionToPin,
                 @Nullable final ServerCursor serverCursor) {
-            super(namespace, connectionSource, connectionToPin, serverCursor);
+            super(connectionSource.getOperationContext().getTimeoutContext(), timeoutMode, namespace, connectionSource, connectionToPin,
+                    serverCursor);
         }
 
         /**
@@ -250,6 +265,7 @@ void doClose() {
                 unsetServerCursor();
             }
 
+            resetTimeout();
             if (getServerCursor() != null) {
                 getConnection((connection, t) -> {
                     if (connection != null) {
@@ -271,8 +287,8 @@ <R> void executeWithConnection(final AsyncCallableConnectionWithCallback<R> call
                     return;
                 }
                 callable.call(assertNotNull(connection), (result, t1) -> {
-                    if (t1 instanceof MongoSocketException) {
-                        onCorruptedConnection(connection, (MongoSocketException) t1);
+                    if (t1 != null) {
+                        handleException(connection, t1);
                     }
                     connection.release();
                     callback.onResult(result, t1);
@@ -280,6 +296,14 @@ <R> void executeWithConnection(final AsyncCallableConnectionWithCallback<R> call
             });
         }
 
+        private void handleException(final AsyncConnection connection, final Throwable exception) {
+            if (exception instanceof MongoOperationTimeoutException && exception.getCause() instanceof MongoSocketException) {
+                onCorruptedConnection(connection, (MongoSocketException) exception.getCause());
+            } else if (exception instanceof MongoSocketException) {
+                onCorruptedConnection(connection, (MongoSocketException) exception);
+            }
+        }
+
         private void getConnection(final SingleResultCallback<AsyncConnection> callback) {
             assertTrue(getState() != State.IDLE);
             AsyncConnection pinnedConnection = getPinnedConnection();
@@ -305,9 +329,13 @@ private void releaseServerAndClientResources(final AsyncConnection connection) {
 
         private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor,
                 final AsyncConnection localConnection, final SingleResultCallback<Void> callback) {
+            OperationContext operationContext = assertNotNull(getConnectionSource()).getOperationContext();
+            TimeoutContext timeoutContext = operationContext.getTimeoutContext();
+            timeoutContext.resetToDefaultMaxTime();
+
             localConnection.commandAsync(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor),
                     NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(),
-                    assertNotNull(getConnectionSource()), (r, t) -> callback.onResult(null, null));
+                    operationContext, (r, t) -> callback.onResult(null, null));
         }
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java
index b56f624bef5..35782219545 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java
@@ -20,6 +20,8 @@
 import com.mongodb.MongoException;
 import com.mongodb.ReadPreference;
 import com.mongodb.assertions.Assertions;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.async.function.AsyncCallbackBiFunction;
@@ -132,8 +134,12 @@ static <R, T extends ReferenceCounted> void withAsyncSuppliedResource(final Asyn
                 errorHandlingCallback.onResult(null, supplierException);
             } else {
                 Assertions.assertNotNull(resource);
-                AsyncCallbackSupplier<R> curriedFunction = c -> function.apply(resource, c);
-                curriedFunction.whenComplete(resource::release).get(errorHandlingCallback);
+                try {
+                    AsyncCallbackSupplier<R> curriedFunction = c -> function.apply(resource, c);
+                    curriedFunction.whenComplete(resource::release).get(errorHandlingCallback);
+                } catch (Exception e) {
+                    errorHandlingCallback.onResult(null, e);
+                }
             }
         });
     }
@@ -162,8 +168,8 @@ static <D, T> void executeRetryableReadAsync(
             final CommandReadTransformerAsync<D, T> transformer,
             final boolean retryReads,
             final SingleResultCallback<T> callback) {
-        executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads,
-                callback);
+        executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator,
+                                  decoder, transformer, retryReads, callback);
     }
 
     static <D, T> void executeRetryableReadAsync(
@@ -175,28 +181,41 @@ static <D, T> void executeRetryableReadAsync(
             final CommandReadTransformerAsync<D, T> transformer,
             final boolean retryReads,
             final SingleResultCallback<T> callback) {
-        RetryState retryState = initialRetryState(retryReads);
+        RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
         binding.retain();
+        OperationContext operationContext = binding.getOperationContext();
         AsyncCallbackSupplier<T> asyncRead = decorateReadWithRetriesAsync(retryState, binding.getOperationContext(),
                 (AsyncCallbackSupplier<T>) funcCallback ->
                         withAsyncSourceAndConnection(sourceAsyncSupplier, false, funcCallback,
                                 (source, connection, releasingCallback) -> {
                                     if (retryState.breakAndCompleteIfRetryAnd(
                                             () -> !OperationHelper.canRetryRead(source.getServerDescription(),
-                                                    binding.getSessionContext()),
+                                                    operationContext),
                                             releasingCallback)) {
                                         return;
                                     }
-                                    createReadCommandAndExecuteAsync(retryState, binding, source,
-                                            database, commandCreator,
-                                            decoder, transformer,
-                                            connection,
-                                            releasingCallback);
+                                    createReadCommandAndExecuteAsync(retryState, operationContext, source, database,
+                                                                     commandCreator, decoder, transformer, connection, releasingCallback);
                                 })
         ).whenComplete(binding::release);
         asyncRead.get(errorHandlingCallback(callback, OperationHelper.LOGGER));
     }
 
+    static <T> void executeCommandAsync(
+            final AsyncWriteBinding binding,
+            final String database,
+            final CommandCreator commandCreator,
+            final CommandWriteTransformerAsync<BsonDocument, T> transformer,
+            final SingleResultCallback<T> callback) {
+        Assertions.notNull("binding", binding);
+        withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback,
+                (source, connection, releasingCallback) ->
+                        executeCommandAsync(binding, database, commandCreator.create(
+                                binding.getOperationContext(), source.getServerDescription(), connection.getDescription()),
+                                connection, transformer, releasingCallback)
+        );
+    }
+
     static <T> void executeCommandAsync(final AsyncWriteBinding binding,
             final String database,
             final BsonDocument command,
@@ -207,7 +226,7 @@ static <T> void executeCommandAsync(final AsyncWriteBinding binding,
         SingleResultCallback<T> addingRetryableLabelCallback = addingRetryableLabelCallback(callback,
                 connection.getDescription().getMaxWireVersion());
         connection.commandAsync(database, command, new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(),
-                binding, transformingWriteCallback(transformer, connection, addingRetryableLabelCallback));
+                binding.getOperationContext(), transformingWriteCallback(transformer, connection, addingRetryableLabelCallback));
     }
 
     static <T, R> void executeRetryableWriteAsync(
@@ -220,14 +239,16 @@ static <T, R> void executeRetryableWriteAsync(
             final CommandWriteTransformerAsync<T, R> transformer,
             final Function<BsonDocument, BsonDocument> retryCommandModifier,
             final SingleResultCallback<R> callback) {
-        RetryState retryState = initialRetryState(true);
+
+        RetryState retryState = initialRetryState(true, binding.getOperationContext().getTimeoutContext());
         binding.retain();
+        OperationContext operationContext = binding.getOperationContext();
 
-        AsyncCallbackSupplier<R> asyncWrite = decorateWriteWithRetriesAsync(retryState, binding.getOperationContext(),
+        AsyncCallbackSupplier<R> asyncWrite = decorateWriteWithRetriesAsync(retryState, operationContext,
                 (AsyncCallbackSupplier<R>) funcCallback -> {
             boolean firstAttempt = retryState.isFirstAttempt();
-            if (!firstAttempt && binding.getSessionContext().hasActiveTransaction()) {
-                binding.getSessionContext().clearTransactionContext();
+            if (!firstAttempt && operationContext.getSessionContext().hasActiveTransaction()) {
+                operationContext.getSessionContext().clearTransactionContext();
             }
             withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback,
                     (source, connection, releasingCallback) -> {
@@ -235,7 +256,8 @@ static <T, R> void executeRetryableWriteAsync(
                         SingleResultCallback<R> addingRetryableLabelCallback = firstAttempt
                                 ? releasingCallback
                                 : addingRetryableLabelCallback(releasingCallback, maxWireVersion);
-                        if (retryState.breakAndCompleteIfRetryAnd(() -> !OperationHelper.canRetryWrite(connection.getDescription(), binding.getSessionContext()),
+                        if (retryState.breakAndCompleteIfRetryAnd(() ->
+                                        !OperationHelper.canRetryWrite(connection.getDescription(), operationContext.getSessionContext()),
                                 addingRetryableLabelCallback)) {
                             return;
                         }
@@ -245,7 +267,10 @@ static <T, R> void executeRetryableWriteAsync(
                                     .map(previousAttemptCommand -> {
                                         Assertions.assertFalse(firstAttempt);
                                         return retryCommandModifier.apply(previousAttemptCommand);
-                                    }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription()));
+                                    }).orElseGet(() -> commandCreator.create(
+                                            operationContext,
+                                            source.getServerDescription(),
+                                            connection.getDescription()));
                             // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry
                             retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true)
                                     .attach(AttachmentKeys.retryableCommandFlag(), isRetryWritesEnabled(command), true)
@@ -255,8 +280,8 @@ static <T, R> void executeRetryableWriteAsync(
                             addingRetryableLabelCallback.onResult(null, t);
                             return;
                         }
-                        connection.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, binding,
-                                transformingWriteCallback(transformer, connection, addingRetryableLabelCallback));
+                        connection.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder,
+                                operationContext, transformingWriteCallback(transformer, connection, addingRetryableLabelCallback));
                     });
         }).whenComplete(binding::release);
 
@@ -265,7 +290,7 @@ static <T, R> void executeRetryableWriteAsync(
 
     static <D, T> void createReadCommandAndExecuteAsync(
             final RetryState retryState,
-            final AsyncReadBinding binding,
+            final OperationContext operationContext,
             final AsyncConnectionSource source,
             final String database,
             final CommandCreator commandCreator,
@@ -275,14 +300,14 @@ static <D, T> void createReadCommandAndExecuteAsync(
             final SingleResultCallback<T> callback) {
         BsonDocument command;
         try {
-            command = commandCreator.create(source.getServerDescription(), connection.getDescription());
+            command = commandCreator.create(operationContext, source.getServerDescription(), connection.getDescription());
             retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false);
         } catch (IllegalArgumentException e) {
             callback.onResult(null, e);
             return;
         }
         connection.commandAsync(database, command, new NoOpFieldNameValidator(), source.getReadPreference(), decoder,
-                binding, transformingReadCallback(transformer, source, connection, callback));
+                operationContext, transformingReadCallback(transformer, source, connection, callback));
     }
 
     static <R> AsyncCallbackSupplier<R> decorateReadWithRetriesAsync(final RetryState retryState, final OperationContext operationContext,
@@ -303,10 +328,12 @@ static <R> AsyncCallbackSupplier<R> decorateWriteWithRetriesAsync(final RetrySta
         });
     }
 
-    static CommandWriteTransformerAsync<BsonDocument, Void> writeConcernErrorTransformerAsync() {
+    static CommandWriteTransformerAsync<BsonDocument, Void> writeConcernErrorTransformerAsync(final TimeoutContext timeoutContext) {
         return (result, connection) -> {
             assertNotNull(result);
-            throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), connection.getDescription().getMaxWireVersion());
+            throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
+                    connection.getDescription().getMaxWireVersion(),
+                    timeoutContext);
             return null;
         };
     }
@@ -316,9 +343,10 @@ static <T> CommandReadTransformerAsync<BsonDocument, AsyncBatchCursor<T>> asyncS
                 new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0);
     }
 
-    static <T> AsyncBatchCursor<T> cursorDocumentToAsyncBatchCursor(final BsonDocument cursorDocument, final Decoder<T> decoder,
-            final BsonValue comment, final AsyncConnectionSource source, final AsyncConnection connection, final int batchSize) {
-        return new AsyncCommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection);
+    static <T> AsyncBatchCursor<T> cursorDocumentToAsyncBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument,
+            final int batchSize, final Decoder<T> decoder, final BsonValue comment, final AsyncConnectionSource source,
+            final AsyncConnection connection) {
+        return new AsyncCommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection);
     }
 
     static <T> SingleResultCallback<T> releasingCallback(final SingleResultCallback<T> wrapped, final AsyncConnection connection) {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java
index c266c135529..77434bd9781 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java
@@ -22,6 +22,7 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
 import com.mongodb.bulk.BulkWriteResult;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.BulkWriteOptions;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.CountOptions;
@@ -45,6 +46,7 @@
 import com.mongodb.client.model.WriteModel;
 import com.mongodb.client.model.changestream.FullDocument;
 import com.mongodb.client.model.changestream.FullDocumentBeforeChange;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.FindOptions;
@@ -60,18 +62,25 @@
 import java.util.List;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public final class AsyncOperations<TDocument> {
     private final Operations<TDocument> operations;
+    private final TimeoutSettings timeoutSettings;
 
     public AsyncOperations(final MongoNamespace namespace, final Class<TDocument> documentClass, final ReadPreference readPreference,
             final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern,
-            final boolean retryWrites, final boolean retryReads) {
-        this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern,
+            final boolean retryWrites, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        WriteConcern writeConcernToUse = writeConcern;
+        if (timeoutSettings.getTimeoutMS() != null) {
+            writeConcernToUse = assertNotNull(WriteConcernHelper.cloneWithoutTimeout(writeConcern));
+        }
+        this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcernToUse,
                 retryWrites, retryReads);
+        this.timeoutSettings = timeoutSettings;
     }
 
     public MongoNamespace getNamespace() {
@@ -98,6 +107,10 @@ public WriteConcern getWriteConcern() {
         return operations.getWriteConcern();
     }
 
+    public TimeoutSettings getTimeoutSettings() {
+        return timeoutSettings;
+    }
+
     public boolean isRetryWrites() {
         return operations.isRetryWrites();
     }
@@ -106,6 +119,44 @@ public boolean isRetryReads() {
         return operations.isRetryReads();
     }
 
+    public TimeoutSettings createTimeoutSettings(final long maxTimeMS) {
+        return timeoutSettings.withMaxTimeMS(maxTimeMS);
+    }
+
+    public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS);
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final CountOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOptions options) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) {
+        return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS));
+    }
+
     public AsyncReadOperation<Long> countDocuments(final Bson filter, final CountOptions options) {
         return operations.countDocuments(filter, options);
     }
@@ -130,52 +181,52 @@ public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> find(final MongoN
     }
 
     public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> distinct(final String fieldName, final Bson filter,
-            final Class<TResult> resultClass, final long maxTimeMS,
-            final Collation collation, final BsonValue comment) {
-        return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment);
+            final Class<TResult> resultClass, final Collation collation, final BsonValue comment) {
+        return operations.distinct(fieldName, filter, resultClass, collation, comment);
     }
 
-    public <TResult> AsyncExplainableReadOperation<AsyncBatchCursor<TResult>> aggregate(final List<? extends Bson> pipeline,
+    public <TResult> AsyncExplainableReadOperation<AsyncBatchCursor<TResult>> aggregate(
+            final List<? extends Bson> pipeline,
             final Class<TResult> resultClass,
-            final long maxTimeMS, final long maxAwaitTimeMS,
-            final Integer batchSize,
+            @Nullable final TimeoutMode timeoutMode,
+            @Nullable final Integer batchSize,
             final Collation collation, final Bson hint,
             final String hintString,
             final BsonValue comment,
             final Bson variables,
             final Boolean allowDiskUse,
             final AggregationLevel aggregationLevel) {
-        return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, batchSize, collation, hint, hintString, comment,
-                variables, allowDiskUse, aggregationLevel);
+        return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString,
+                comment, variables, allowDiskUse, aggregationLevel);
     }
 
-    public AsyncReadOperation<Void> aggregateToCollection(final List<? extends Bson> pipeline, final long maxTimeMS,
-            final Boolean allowDiskUse, final Boolean bypassDocumentValidation,
+    public AsyncReadOperation<Void> aggregateToCollection(final List<? extends Bson> pipeline,
+            @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation,
             final Collation collation, final Bson hint, final String hintString, final BsonValue comment,
             final Bson variables, final AggregationLevel aggregationLevel) {
-        return operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString,
-                comment, variables, aggregationLevel);
+        return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint,
+                hintString, comment, variables, aggregationLevel);
     }
 
     @SuppressWarnings("deprecation")
     public AsyncWriteOperation<MapReduceStatistics> mapReduceToCollection(final String databaseName, final String collectionName,
             final String mapFunction, final String reduceFunction,
             final String finalizeFunction, final Bson filter, final int limit,
-            final long maxTimeMS, final boolean jsMode, final Bson scope,
+             final boolean jsMode, final Bson scope,
             final Bson sort, final boolean verbose,
             final com.mongodb.client.model.MapReduceAction action,
             final Boolean bypassDocumentValidation, final Collation collation) {
         return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit,
-                maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation);
+                jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation);
     }
 
     public <TResult> AsyncReadOperation<MapReduceAsyncBatchCursor<TResult>> mapReduce(final String mapFunction, final String reduceFunction,
             final String finalizeFunction, final Class<TResult> resultClass,
             final Bson filter, final int limit,
-            final long maxTimeMS, final boolean jsMode, final Bson scope,
+             final boolean jsMode, final Bson scope,
             final Bson sort, final boolean verbose,
             final Collation collation) {
-        return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope,
+        return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope,
                 sort, verbose, collation);
     }
 
@@ -288,14 +339,9 @@ public AsyncWriteOperation<Void> dropSearchIndex(final String indexName) {
     }
 
     public <TResult> AsyncExplainableReadOperation<AsyncBatchCursor<TResult>> listSearchIndexes(final Class<TResult> resultClass,
-                                                                                      final long maxTimeMS,
-                                                                                      @Nullable final String indexName,
-                                                                                      @Nullable final Integer batchSize,
-                                                                                      @Nullable final Collation collation,
-                                                                                      @Nullable final BsonValue comment,
-                                                                                      @Nullable final Boolean allowDiskUse) {
-        return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation,
-                comment, allowDiskUse);
+            @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation,
+            @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) {
+        return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse);
     }
 
     public AsyncWriteOperation<Void> dropIndex(final String indexName, final DropIndexOptions options) {
@@ -306,31 +352,29 @@ public AsyncWriteOperation<Void> dropIndex(final Bson keys, final DropIndexOptio
         return operations.dropIndex(keys, options);
     }
 
-    public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> listCollections(final String databaseName, final Class<TResult> resultClass,
-            final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections,
-            final Integer batchSize, final long maxTimeMS,
-            final BsonValue comment) {
+    public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> listCollections(final String databaseName,
+            final Class<TResult> resultClass, final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections,
+            @Nullable final Integer batchSize,  final BsonValue comment,  @Nullable final TimeoutMode timeoutMode) {
         return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections,
-                batchSize, maxTimeMS, comment);
+                batchSize, comment, timeoutMode);
     }
 
     public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> listDatabases(final Class<TResult> resultClass, final Bson filter,
-            final Boolean nameOnly, final long maxTimeMS,
-            final Boolean authorizedDatabases, final BsonValue comment) {
-        return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment);
+            final Boolean nameOnly,  final Boolean authorizedDatabases, final BsonValue comment) {
+        return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment);
     }
 
-    public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> listIndexes(final Class<TResult> resultClass, final Integer batchSize,
-            final long maxTimeMS, final BsonValue comment) {
-        return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment);
+    public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> listIndexes(final Class<TResult> resultClass,
+            @Nullable final Integer batchSize,  final BsonValue comment, @Nullable final TimeoutMode timeoutMode) {
+        return operations.listIndexes(resultClass, batchSize, comment, timeoutMode);
     }
 
     public <TResult> AsyncReadOperation<AsyncBatchCursor<TResult>> changeStream(final FullDocument fullDocument,
             final FullDocumentBeforeChange fullDocumentBeforeChange, final List<? extends Bson> pipeline,
             final Decoder<TResult> decoder, final ChangeStreamLevel changeStreamLevel, final Integer batchSize, final Collation collation,
-            final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime,
+            final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime,
             final BsonDocument startAfter, final boolean showExpandedEvents) {
         return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize,
-                collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
+                collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java
index 5179d3096b3..e523ee3f389 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java
@@ -32,17 +32,13 @@
 import org.bson.FieldNameValidator;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync;
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
-import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
 import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite;
 import static com.mongodb.internal.operation.OperationHelper.validateHintForFindAndModify;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * Abstract base class for findAndModify-based operations
@@ -50,7 +46,6 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public abstract class BaseFindAndModifyOperation<T> implements AsyncWriteOperation<T>, WriteOperation<T> {
-
     private final MongoNamespace namespace;
     private final WriteConcern writeConcern;
     private final boolean retryWrites;
@@ -59,15 +54,14 @@ public abstract class BaseFindAndModifyOperation<T> implements AsyncWriteOperati
     private BsonDocument filter;
     private BsonDocument projection;
     private BsonDocument sort;
-    private long maxTimeMS;
     private Collation collation;
     private BsonDocument hint;
     private String hintString;
     private BsonValue comment;
     private BsonDocument variables;
 
-    protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern,
-                                         final boolean retryWrites, final Decoder<T> decoder) {
+    protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites,
+            final Decoder<T> decoder) {
         this.namespace = notNull("namespace", namespace);
         this.writeConcern = notNull("writeConcern", writeConcern);
         this.retryWrites = retryWrites;
@@ -77,17 +71,18 @@ protected BaseFindAndModifyOperation(final MongoNamespace namespace, final Write
     @Override
     public T execute(final WriteBinding binding) {
         return executeRetryableWrite(binding, getDatabaseName(), null, getFieldNameValidator(),
-                CommandResultDocumentCodec.create(getDecoder(), "value"),
-                getCommandCreator(binding.getSessionContext()),
-                FindAndModifyHelper.transformer(),
-                cmd -> cmd);
+                                     CommandResultDocumentCodec.create(getDecoder(), "value"),
+                                     getCommandCreator(),
+                                     FindAndModifyHelper.transformer(),
+                                     cmd -> cmd);
     }
 
     @Override
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<T> callback) {
         executeRetryableWriteAsync(binding, getDatabaseName(), null, getFieldNameValidator(),
-                CommandResultDocumentCodec.create(getDecoder(), "value"),
-                getCommandCreator(binding.getSessionContext()), FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback);
+                                   CommandResultDocumentCodec.create(getDecoder(), "value"),
+                                   getCommandCreator(),
+                FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback);
     }
 
     public MongoNamespace getNamespace() {
@@ -124,17 +119,6 @@ public BaseFindAndModifyOperation<T> projection(@Nullable final BsonDocument pro
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, MILLISECONDS);
-    }
-
-    public BaseFindAndModifyOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public BsonDocument getSort() {
         return sort;
     }
@@ -196,8 +180,10 @@ public BaseFindAndModifyOperation<T> let(@Nullable final BsonDocument variables)
 
     protected abstract void specializeCommand(BsonDocument initialCommand, ConnectionDescription connectionDescription);
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> {
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            SessionContext sessionContext = operationContext.getSessionContext();
+
             BsonDocument commandDocument = new BsonDocument("findAndModify", new BsonString(getNamespace().getCollectionName()));
             putIfNotNull(commandDocument, "query", getFilter());
             putIfNotNull(commandDocument, "fields", getProjection());
@@ -205,8 +191,8 @@ private CommandCreator getCommandCreator(final SessionContext sessionContext) {
 
             specializeCommand(commandDocument, connectionDescription);
 
-            putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS));
-            if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault() && !sessionContext.hasActiveTransaction()) {
+            if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault()
+                    && !sessionContext.hasActiveTransaction()) {
                 commandDocument.put("writeConcern", getWriteConcern().asDocument());
             }
             if (getCollation() != null) {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java
index 6d6a76885be..f1551da3b2d 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java
@@ -33,6 +33,7 @@
 import com.mongodb.internal.bulk.WriteRequestWithIndex;
 import com.mongodb.internal.connection.BulkWriteBatchCombiner;
 import com.mongodb.internal.connection.IndexMap;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.SplittablePayload;
 import com.mongodb.internal.session.SessionContext;
 import com.mongodb.internal.validator.MappedFieldNameValidator;
@@ -90,7 +91,7 @@ public final class BulkWriteBatch {
     private final BsonDocument command;
     private final SplittablePayload payload;
     private final List<WriteRequestWithIndex> unprocessed;
-    private final SessionContext sessionContext;
+    private final OperationContext operationContext;
     private final BsonValue comment;
     private final BsonDocument variables;
 
@@ -99,8 +100,9 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace,
                                                       final boolean ordered, final WriteConcern writeConcern,
                                                       final Boolean bypassDocumentValidation, final boolean retryWrites,
                                                       final List<? extends WriteRequest> writeRequests,
-                                                      final SessionContext sessionContext,
+                                                      final OperationContext operationContext,
                                                       @Nullable final BsonValue comment, @Nullable final BsonDocument variables) {
+        SessionContext sessionContext = operationContext.getSessionContext();
         if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !sessionContext.hasActiveTransaction()
                 && !writeConcern.isAcknowledged()) {
             throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session");
@@ -119,13 +121,13 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace,
         }
         return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation,
                 canRetryWrites, new BulkWriteBatchCombiner(connectionDescription.getServerAddress(), ordered, writeConcern),
-                writeRequestsWithIndex, sessionContext, comment, variables);
+                writeRequestsWithIndex, operationContext, comment, variables);
     }
 
     private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescription connectionDescription,
                            final boolean ordered, final WriteConcern writeConcern, @Nullable final Boolean bypassDocumentValidation,
                            final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner,
-                           final List<WriteRequestWithIndex> writeRequestsWithIndices, final SessionContext sessionContext,
+                           final List<WriteRequestWithIndex> writeRequestsWithIndices, final OperationContext operationContext,
                            @Nullable final BsonValue comment, @Nullable final BsonDocument variables) {
         this.namespace = namespace;
         this.connectionDescription = connectionDescription;
@@ -159,11 +161,12 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti
         this.indexMap = indexMap;
         this.unprocessed = unprocessedItems;
         this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems);
-        this.sessionContext = sessionContext;
+        this.operationContext = operationContext;
         this.comment = comment;
         this.variables = variables;
         this.command = new BsonDocument();
 
+        SessionContext sessionContext = operationContext.getSessionContext();
         if (!payloadItems.isEmpty()) {
             command.put(getCommandName(batchType), new BsonString(namespace.getCollectionName()));
             command.put("ordered", new BsonBoolean(ordered));
@@ -185,7 +188,7 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti
                            final boolean ordered, final WriteConcern writeConcern, final Boolean bypassDocumentValidation,
                            final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner, final IndexMap indexMap,
                            final WriteRequest.Type batchType, final BsonDocument command, final SplittablePayload payload,
-                           final List<WriteRequestWithIndex> unprocessed, final SessionContext sessionContext,
+                           final List<WriteRequestWithIndex> unprocessed, final OperationContext operationContext,
                            @Nullable final BsonValue comment, @Nullable final BsonDocument variables) {
         this.namespace = namespace;
         this.connectionDescription = connectionDescription;
@@ -198,11 +201,11 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti
         this.payload = payload;
         this.unprocessed = unprocessed;
         this.retryWrites = retryWrites;
-        this.sessionContext = sessionContext;
+        this.operationContext = operationContext;
         this.comment = comment;
         this.variables = variables;
         if (retryWrites) {
-            command.put("txnNumber", new BsonInt64(sessionContext.advanceTransactionNumber()));
+            command.put("txnNumber", new BsonInt64(operationContext.getSessionContext().advanceTransactionNumber()));
         }
         this.command = command;
     }
@@ -266,11 +269,11 @@ BulkWriteBatch getNextBatch() {
 
 
             return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites,
-                    bulkWriteBatchCombiner, nextIndexMap, batchType, command, payload.getNextSplit(), unprocessed, sessionContext,
+                    bulkWriteBatchCombiner, nextIndexMap, batchType, command, payload.getNextSplit(), unprocessed, operationContext,
                     comment, variables);
         } else {
             return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites,
-                    bulkWriteBatchCombiner, unprocessed, sessionContext, comment, variables);
+                    bulkWriteBatchCombiner, unprocessed, operationContext, comment, variables);
         }
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java
index a3c134b720c..c4bd72a4775 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java
@@ -18,8 +18,10 @@
 
 import com.mongodb.MongoChangeStreamException;
 import com.mongodb.MongoException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.ServerCursor;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -37,26 +39,50 @@
 import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError;
 import static com.mongodb.internal.operation.SyncOperationHelper.withReadConnectionSource;
 
+/**
+ * A change stream cursor that wraps {@link CommandBatchCursor} with automatic resumption capabilities in the event
+ * of timeouts or transient errors.
+ * <p>
+ * Upon encountering a resumable error during {@code hasNext()}, {@code next()}, or {@code tryNext()} calls, the {@link ChangeStreamBatchCursor}
+ * attempts to establish a new change stream on the server.
+ * </p>
+ * If an error occurring during any of these method calls is not resumable, it is immediately propagated to the caller, and the {@link ChangeStreamBatchCursor}
+ * is closed and invalidated on the server. Server errors that occur during this invalidation process are not propagated to the caller.
+ * <p>
+ * A {@link MongoOperationTimeoutException} does not invalidate the {@link ChangeStreamBatchCursor}, but is immediately propagated to the caller.
+ * Subsequent method call will attempt to resume operation by establishing a new change stream on the server, without doing {@code getMore}
+ * request first.
+ * </p>
+ */
 final class ChangeStreamBatchCursor<T> implements AggregateResponseBatchCursor<T> {
     private final ReadBinding binding;
     private final ChangeStreamOperation<T> changeStreamOperation;
     private final int maxWireVersion;
-
+    private final TimeoutContext timeoutContext;
     private CommandBatchCursor<RawBsonDocument> wrapped;
     private BsonDocument resumeToken;
     private final AtomicBoolean closed;
 
+    /**
+     * This flag is used to manage change stream resumption logic after a timeout error.
+     * Indicates whether the last {@code hasNext()}, {@code next()}, or {@code tryNext()} call resulted in a {@link MongoOperationTimeoutException}.
+     * If {@code true}, indicates a timeout occurred, prompting an attempt to resume the change stream on the subsequent call.
+     */
+    private boolean lastOperationTimedOut;
+
     ChangeStreamBatchCursor(final ChangeStreamOperation<T> changeStreamOperation,
                             final CommandBatchCursor<RawBsonDocument> wrapped,
                             final ReadBinding binding,
                             @Nullable final BsonDocument resumeToken,
                             final int maxWireVersion) {
+        this.timeoutContext = binding.getOperationContext().getTimeoutContext();
         this.changeStreamOperation = changeStreamOperation;
         this.binding = binding.retain();
         this.wrapped = wrapped;
         this.resumeToken = resumeToken;
         this.maxWireVersion = maxWireVersion;
         closed = new AtomicBoolean();
+        lastOperationTimedOut = false;
     }
 
     CommandBatchCursor<RawBsonDocument> getWrapped() {
@@ -107,6 +133,7 @@ public List<T> tryNext() {
     @Override
     public void close() {
         if (!closed.getAndSet(true)) {
+            timeoutContext.resetTimeoutIfPresent();
             wrapped.close();
             binding.release();
         }
@@ -184,22 +211,50 @@ static <T> List<T> convertAndProduceLastId(final List<RawBsonDocument> rawDocume
     }
 
     <R> R resumeableOperation(final Function<AggregateResponseBatchCursor<RawBsonDocument>, R> function) {
+        timeoutContext.resetTimeoutIfPresent();
+        try {
+            R result = execute(function);
+            lastOperationTimedOut = false;
+            return result;
+        } catch (Throwable exception) {
+            lastOperationTimedOut = isTimeoutException(exception);
+            throw exception;
+        }
+    }
+
+    private <R> R execute(final Function<AggregateResponseBatchCursor<RawBsonDocument>, R> function) {
+        boolean shouldBeResumed = hasPreviousNextTimedOut();
         while (true) {
+            if (shouldBeResumed) {
+                resumeChangeStream();
+            }
             try {
                 return function.apply(wrapped);
             } catch (Throwable t) {
                 if (!isResumableError(t, maxWireVersion)) {
                     throw MongoException.fromThrowableNonNull(t);
                 }
+                shouldBeResumed = true;
             }
-            wrapped.close();
-
-            withReadConnectionSource(binding, source -> {
-                changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion());
-                return null;
-            });
-            wrapped = ((ChangeStreamBatchCursor<T>) changeStreamOperation.execute(binding)).getWrapped();
-            binding.release(); // release the new change stream batch cursor's reference to the binding
         }
     }
+
+    private void resumeChangeStream() {
+        wrapped.close();
+
+        withReadConnectionSource(binding, source -> {
+            changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion());
+            return null;
+        });
+        wrapped = ((ChangeStreamBatchCursor<T>) changeStreamOperation.execute(binding)).getWrapped();
+        binding.release(); // release the new change stream batch cursor's reference to the binding
+    }
+
+    private boolean hasPreviousNextTimedOut() {
+        return lastOperationTimedOut && !closed.get();
+    }
+
+    private static boolean isTimeoutException(final Throwable exception) {
+        return exception instanceof MongoOperationTimeoutException;
+    }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java
index 148c988fe48..7cfdd474dda 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java
@@ -22,6 +22,7 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoInterruptedException;
 import com.mongodb.MongoNotPrimaryException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSocketException;
 import com.mongodb.internal.VisibleForTesting;
 
@@ -39,7 +40,8 @@ final class ChangeStreamBatchCursorHelper {
     static final String RESUMABLE_CHANGE_STREAM_ERROR_LABEL = "ResumableChangeStreamError";
 
     static boolean isResumableError(final Throwable t, final int maxWireVersion) {
-        if (!(t instanceof MongoException) || (t instanceof MongoChangeStreamException) || (t instanceof MongoInterruptedException)) {
+        if (!(t instanceof MongoException) || (t instanceof MongoChangeStreamException) || (t instanceof MongoInterruptedException)
+            || (t instanceof MongoOperationTimeoutException)) {
             return false;
         } else if (t instanceof MongoNotPrimaryException || t instanceof MongoCursorNotFoundException
                 || t instanceof MongoSocketException | t instanceof MongoClientException) {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java
index 8df093a6e9a..6231e98de12 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java
@@ -16,10 +16,12 @@
 
 package com.mongodb.internal.operation;
 
+import com.mongodb.CursorType;
 import com.mongodb.MongoNamespace;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.changestream.FullDocument;
 import com.mongodb.client.model.changestream.FullDocumentBeforeChange;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
@@ -39,10 +41,10 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.client.cursor.TimeoutMode.CURSOR_LIFETIME;
 
 /**
  * An operation that executes an {@code $changeStream} aggregation.
@@ -69,10 +71,10 @@ public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument
     }
 
     public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument,
-            final FullDocumentBeforeChange fullDocumentBeforeChange, final List<BsonDocument> pipeline,
-            final Decoder<T> decoder, final ChangeStreamLevel changeStreamLevel) {
-        this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC,
-                getAggregateTarget(), getPipelineCreator());
+            final FullDocumentBeforeChange fullDocumentBeforeChange, final List<BsonDocument> pipeline, final Decoder<T> decoder,
+            final ChangeStreamLevel changeStreamLevel) {
+        this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, getAggregateTarget(),
+                getPipelineCreator()).cursorType(CursorType.TailableAwait);
         this.fullDocument = notNull("fullDocument", fullDocument);
         this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange);
         this.decoder = notNull("decoder", decoder);
@@ -122,15 +124,6 @@ public ChangeStreamOperation<T> batchSize(@Nullable final Integer batchSize) {
         return this;
     }
 
-    public long getMaxAwaitTime(final TimeUnit timeUnit) {
-        return wrapped.getMaxAwaitTime(timeUnit);
-    }
-
-    public ChangeStreamOperation<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        wrapped.maxAwaitTime(maxAwaitTime, timeUnit);
-        return this;
-    }
-
     public Collation getCollation() {
         return wrapped.getCollation();
     }
@@ -177,9 +170,34 @@ public ChangeStreamOperation<T> showExpandedEvents(final boolean showExpandedEve
         return this;
     }
 
+    /**
+     * Gets an aggregate operation with consideration for timeout settings.
+     * <p>
+     * Change streams act similarly to tailable awaitData cursors, with identical timeoutMS option behavior.
+     * Key distinctions include:
+     * - The timeoutMS option must be applied at the start of the aggregate operation for change streams.
+     * - Change streams support resumption on next() calls. The driver handles automatic resumption for transient errors.
+     * <p>
+     *
+     * As a result, when {@code timeoutContext.hasTimeoutMS()} the CURSOR_LIFETIME setting is utilized to manage the underlying cursor's
+     * lifespan in change streams.
+     *
+     * @param timeoutContext
+     * @return An AggregateOperationImpl
+     */
+    private AggregateOperationImpl<RawBsonDocument> getAggregateOperation(final TimeoutContext timeoutContext) {
+        if (timeoutContext.hasTimeoutMS()) {
+            return wrapped.timeoutMode(CURSOR_LIFETIME);
+        }
+        return wrapped;
+    }
+
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        CommandBatchCursor<RawBsonDocument> cursor = (CommandBatchCursor<RawBsonDocument>) wrapped.execute(binding);
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
+        CommandBatchCursor<RawBsonDocument> cursor = (CommandBatchCursor<RawBsonDocument>) getAggregateOperation(timeoutContext).execute(binding);
+        cursor.setCloseWithoutTimeoutReset(true);
+
             return new ChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding,
                     setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(),
                             cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion());
@@ -187,11 +205,14 @@ public BatchCursor<T> execute(final ReadBinding binding) {
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        wrapped.executeAsync(binding, (result, t) -> {
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
+        getAggregateOperation(timeoutContext).executeAsync(binding, (result, t) -> {
             if (t != null) {
                 callback.onResult(null, t);
             } else {
                 AsyncCommandBatchCursor<RawBsonDocument> cursor = (AsyncCommandBatchCursor<RawBsonDocument>) assertNotNull(result);
+                cursor.setCloseWithoutTimeoutReset(true);
+
                 callback.onResult(new AsyncChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding,
                         setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(),
                                 cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()), null);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java
index f71cce0527b..410098db2c0 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java
@@ -19,16 +19,20 @@
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoException;
 import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSocketException;
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerAddress;
 import com.mongodb.ServerCursor;
 import com.mongodb.annotations.ThreadSafe;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerType;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.binding.ConnectionSource;
 import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonTimestamp;
@@ -57,7 +61,6 @@
 class CommandBatchCursor<T> implements AggregateResponseBatchCursor<T> {
 
     private final MongoNamespace namespace;
-    private final long maxTimeMS;
     private final Decoder<T> decoder;
     @Nullable
     private final BsonValue comment;
@@ -71,6 +74,7 @@ class CommandBatchCursor<T> implements AggregateResponseBatchCursor<T> {
     private List<T> nextBatch;
 
     CommandBatchCursor(
+            final TimeoutMode timeoutMode,
             final BsonDocument commandCursorDocument,
             final int batchSize, final long maxTimeMS,
             final Decoder<T> decoder,
@@ -81,14 +85,16 @@ class CommandBatchCursor<T> implements AggregateResponseBatchCursor<T> {
         this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument);
         this.namespace = commandCursorResult.getNamespace();
         this.batchSize = batchSize;
-        this.maxTimeMS = maxTimeMS;
         this.decoder = decoder;
         this.comment = comment;
         this.maxWireVersion = connectionDescription.getMaxWireVersion();
         this.firstBatchEmpty = commandCursorResult.getResults().isEmpty();
 
+        connectionSource.getOperationContext().getTimeoutContext().setMaxTimeOverride(maxTimeMS);
+
         Connection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER ? connection : null;
-        resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor());
+        resourceManager = new ResourceManager(timeoutMode, namespace, connectionSource, connectionToPin,
+                commandCursorResult.getServerCursor());
     }
 
     @Override
@@ -101,6 +107,7 @@ private boolean doHasNext() {
             return true;
         }
 
+        resourceManager.checkTimeoutModeAndResetTimeoutContextIfIteration();
         while (resourceManager.getServerCursor() != null) {
             getMore();
             if (!resourceManager.operable()) {
@@ -229,12 +236,11 @@ private void getMore() {
                 this.commandCursorResult = toCommandCursorResult(connection.getDescription().getServerAddress(), NEXT_BATCH,
                         assertNotNull(
                             connection.command(namespace.getDatabaseName(),
-                                 getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize,
-                                         maxTimeMS, comment),
+                                 getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment),
                                  NO_OP_FIELD_NAME_VALIDATOR,
                                  ReadPreference.primary(),
                                  CommandResultDocumentCodec.create(decoder, NEXT_BATCH),
-                                 assertNotNull(resourceManager.getConnectionSource()))));
+                                 assertNotNull(resourceManager.getConnectionSource()).getOperationContext())));
                 nextServerCursor = commandCursorResult.getServerCursor();
             } catch (MongoCommandException e) {
                 throw translateCommandException(e, serverCursor);
@@ -252,15 +258,27 @@ private CommandCursorResult<T> toCommandCursorResult(final ServerAddress serverA
         return commandCursorResult;
     }
 
+    /**
+     * Configures the cursor's behavior to close without resetting its timeout. If {@code true}, the cursor attempts to close immediately
+     * without resetting its {@link TimeoutContext#getTimeout()} if present. This is useful when managing the cursor's close behavior externally.
+     *
+     * @param closeWithoutTimeoutReset
+     */
+    void setCloseWithoutTimeoutReset(final boolean closeWithoutTimeoutReset) {
+        this.resourceManager.setCloseWithoutTimeoutReset(closeWithoutTimeoutReset);
+    }
+
     @ThreadSafe
     private static final class ResourceManager extends CursorResourceManager<ConnectionSource, Connection> {
 
         ResourceManager(
+                final TimeoutMode timeoutMode,
                 final MongoNamespace namespace,
                 final ConnectionSource connectionSource,
                 @Nullable final Connection connectionToPin,
                 @Nullable final ServerCursor serverCursor) {
-            super(namespace, connectionSource, connectionToPin, serverCursor);
+            super(connectionSource.getOperationContext().getTimeoutContext(), timeoutMode, namespace, connectionSource, connectionToPin,
+                    serverCursor);
         }
 
         /**
@@ -291,6 +309,7 @@ void doClose() {
             if (isSkipReleasingServerResourcesOnClose()) {
                 unsetServerCursor();
             }
+            resetTimeout();
             try {
                 if (getServerCursor() != null) {
                     Connection connection = getConnection();
@@ -316,6 +335,12 @@ void executeWithConnection(final Consumer<Connection> action) {
             } catch (MongoSocketException e) {
                 onCorruptedConnection(connection, e);
                 throw e;
+            } catch (MongoOperationTimeoutException e) {
+                Throwable cause = e.getCause();
+                if (cause instanceof MongoSocketException) {
+                    onCorruptedConnection(connection, (MongoSocketException) cause);
+                }
+                throw e;
             } finally {
                 connection.release();
             }
@@ -344,9 +369,12 @@ private void releaseServerResources(final Connection connection) {
 
         private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor,
                 final Connection localConnection) {
+            OperationContext operationContext = assertNotNull(getConnectionSource()).getOperationContext();
+            TimeoutContext timeoutContext = operationContext.getTimeoutContext();
+            timeoutContext.resetToDefaultMaxTime();
+
             localConnection.command(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor),
-                    NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(),
-                    assertNotNull(getConnectionSource()));
+                    NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), operationContext);
         }
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java
index eaf03c68ec3..cd7d2468e7f 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java
@@ -51,16 +51,13 @@ final class CommandBatchCursorHelper {
 
     static BsonDocument getMoreCommandDocument(
             final long cursorId, final ConnectionDescription connectionDescription, final MongoNamespace namespace, final int batchSize,
-            final long maxTimeMS, @Nullable final BsonValue comment) {
+            @Nullable final BsonValue comment) {
         BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId))
                 .append("collection", new BsonString(namespace.getCollectionName()));
 
         if (batchSize != 0) {
             document.append("batchSize", new BsonInt32(batchSize));
         }
-        if (maxTimeMS != 0) {
-            document.append("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
         if (serverIsAtLeastVersionFourDotFour(connectionDescription)) {
             putIfNotNull(document, "comment", comment);
         }
@@ -76,7 +73,7 @@ static <T> CommandCursorResult<T> logCommandCursorResult(final CommandCursorResu
     }
 
     static BsonDocument getKillCursorsCommand(final MongoNamespace namespace, final ServerCursor serverCursor) {
-        return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName()))
+       return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName()))
                 .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId()))));
     }
 
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java
index 3f47ba06f89..4c428131853 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java
@@ -28,6 +28,7 @@
 import com.mongodb.assertions.Assertions;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.function.RetryState;
 import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException;
@@ -47,9 +48,11 @@
 @SuppressWarnings("overloads")
 final class CommandOperationHelper {
 
-
     interface CommandCreator {
-        BsonDocument create(ServerDescription serverDescription, ConnectionDescription connectionDescription);
+        BsonDocument create(
+                OperationContext operationContext,
+                ServerDescription serverDescription,
+                ConnectionDescription connectionDescription);
     }
 
     static BinaryOperator<Throwable> onRetryableReadAttemptFailure(final OperationContext operationContext) {
@@ -96,8 +99,11 @@ private static Throwable chooseRetryableWriteException(
 
     /* Read Binding Helpers */
 
-    static RetryState initialRetryState(final boolean retry) {
-        return new RetryState(retry ? RetryState.RETRIES : 0);
+    static RetryState initialRetryState(final boolean retry, final TimeoutContext timeoutContext) {
+        if (retry) {
+            return RetryState.withRetryableState(RetryState.RETRIES, timeoutContext);
+        }
+        return RetryState.withNonRetryableState();
     }
 
     private static final List<Integer> RETRYABLE_ERROR_CODES = asList(6, 7, 89, 91, 189, 262, 9001, 13436, 13435, 11602, 11600, 10107);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java
index 47b807f91ec..ea89dfb303e 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java
@@ -34,27 +34,28 @@
  */
 public class CommandReadOperation<T> implements AsyncReadOperation<T>, ReadOperation<T> {
     private final String databaseName;
-    private final BsonDocument command;
+    private final CommandCreator commandCreator;
     private final Decoder<T> decoder;
 
     public CommandReadOperation(final String databaseName, final BsonDocument command, final Decoder<T> decoder) {
+        this(databaseName, (operationContext, serverDescription, connectionDescription) -> command, decoder);
+    }
+
+    public CommandReadOperation(final String databaseName, final CommandCreator commandCreator, final Decoder<T> decoder) {
         this.databaseName = notNull("databaseName", databaseName);
-        this.command = notNull("command", command);
+        this.commandCreator = notNull("commandCreator", commandCreator);
         this.decoder = notNull("decoder", decoder);
     }
 
     @Override
     public T execute(final ReadBinding binding) {
-        return executeRetryableRead(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result, false);
+        return executeRetryableRead(binding, databaseName, commandCreator, decoder,
+                                    (result, source, connection) -> result, false);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<T> callback) {
-        executeRetryableReadAsync(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result,
-                false, callback);
-    }
-
-    private CommandCreator getCommandCreator() {
-        return (serverDescription, connectionDescription) -> command;
+        executeRetryableReadAsync(binding, databaseName, commandCreator, decoder,
+                                  (result, source, connection) -> result, false, callback);
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java
index 92779bc61ae..6c2338d47de 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java
@@ -25,20 +25,16 @@
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.MongoWriteConcernException;
 import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.binding.WriteBinding;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
-import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL;
-import static com.mongodb.assertions.Assertions.isTrueArgument;
-import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.CommandOperationHelper.RETRYABLE_WRITE_ERROR_LABEL;
 import static java.util.Arrays.asList;
@@ -52,7 +48,6 @@
 public class CommitTransactionOperation extends TransactionOperation {
     private final boolean alreadyCommitted;
     private BsonDocument recoveryToken;
-    private Long maxCommitTimeMS;
 
     public CommitTransactionOperation(final WriteConcern writeConcern) {
         this(writeConcern, false);
@@ -68,26 +63,6 @@ public CommitTransactionOperation recoveryToken(@Nullable final BsonDocument rec
         return this;
     }
 
-    public CommitTransactionOperation maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit timeUnit) {
-        if (maxCommitTime == null) {
-            this.maxCommitTimeMS = null;
-        } else {
-            notNull("timeUnit", timeUnit);
-            isTrueArgument("maxCommitTime > 0", maxCommitTime > 0);
-            this.maxCommitTimeMS = MILLISECONDS.convert(maxCommitTime, timeUnit);
-        }
-        return this;
-    }
-
-    @Nullable
-    public Long getMaxCommitTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        if (maxCommitTimeMS == null) {
-            return null;
-        }
-        return timeUnit.convert(maxCommitTimeMS, MILLISECONDS);
-    }
-
     @Override
     public Void execute(final WriteBinding binding) {
         try {
@@ -143,29 +118,29 @@ protected String getCommandName() {
 
     @Override
     CommandCreator getCommandCreator() {
-        CommandCreator creator = (serverDescription, connectionDescription) -> {
-            BsonDocument command = CommitTransactionOperation.super.getCommandCreator().create(serverDescription,
-                    connectionDescription);
-            if (maxCommitTimeMS != null) {
-                command.append("maxTimeMS",
-                        maxCommitTimeMS > Integer.MAX_VALUE
-                        ? new BsonInt64(maxCommitTimeMS) : new BsonInt32(maxCommitTimeMS.intValue()));
-            }
+        CommandCreator creator = (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument command = CommitTransactionOperation.super.getCommandCreator()
+                    .create(operationContext, serverDescription, connectionDescription);
+            operationContext.getTimeoutContext().setMaxTimeOverrideToMaxCommitTime();
             return command;
         };
         if (alreadyCommitted) {
-            return (serverDescription, connectionDescription) -> getRetryCommandModifier().apply(creator.create(serverDescription, connectionDescription));
+            return (operationContext, serverDescription, connectionDescription) ->
+                    getRetryCommandModifier(operationContext.getTimeoutContext())
+                            .apply(creator.create(operationContext, serverDescription, connectionDescription));
         } else if (recoveryToken != null) {
-                return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken);
+                return (operationContext, serverDescription, connectionDescription) ->
+                        creator.create(operationContext, serverDescription, connectionDescription)
+                                .append("recoveryToken", recoveryToken);
         }
         return creator;
     }
 
     @Override
-    protected Function<BsonDocument, BsonDocument> getRetryCommandModifier() {
+    protected Function<BsonDocument, BsonDocument> getRetryCommandModifier(final TimeoutContext timeoutContext) {
         return command -> {
             WriteConcern retryWriteConcern = getWriteConcern().withW("majority");
-            if (retryWriteConcern.getWTimeout(MILLISECONDS) == null) {
+            if (retryWriteConcern.getWTimeout(MILLISECONDS) == null && !timeoutContext.hasTimeoutMS()) {
                 retryWriteConcern = retryWriteConcern.withWTimeout(10000, MILLISECONDS);
             }
             command.put("writeConcern", retryWriteConcern.asDocument());
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java
index 5cdb974b7c0..1095dd44508 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java
@@ -31,7 +31,6 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -47,13 +46,13 @@ public class CountDocumentsOperation implements AsyncReadOperation<Long>, ReadOp
     private BsonValue comment;
     private long skip;
     private long limit;
-    private long maxTimeMS;
     private Collation collation;
 
     public CountDocumentsOperation(final MongoNamespace namespace) {
         this.namespace = notNull("namespace", namespace);
     }
 
+    @Nullable
     public BsonDocument getFilter() {
         return filter;
     }
@@ -72,6 +71,7 @@ public boolean getRetryReads() {
         return retryReads;
     }
 
+    @Nullable
     public BsonValue getHint() {
         return hint;
     }
@@ -99,17 +99,7 @@ public CountDocumentsOperation skip(final long skip) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public CountDocumentsOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
+    @Nullable
     public Collation getCollation() {
         return collation;
     }
@@ -131,8 +121,9 @@ public CountDocumentsOperation comment(@Nullable final BsonValue comment) {
 
     @Override
     public Long execute(final ReadBinding binding) {
-        BatchCursor<BsonDocument> cursor = getAggregateOperation().execute(binding);
-        return cursor.hasNext() ? getCountFromAggregateResults(cursor.next()) : 0;
+        try (BatchCursor<BsonDocument> cursor = getAggregateOperation().execute(binding)) {
+            return cursor.hasNext() ? getCountFromAggregateResults(cursor.next()) : 0;
+        }
     }
 
     @Override
@@ -157,8 +148,7 @@ private AggregateOperation<BsonDocument> getAggregateOperation() {
                 .retryReads(retryReads)
                 .collation(collation)
                 .comment(comment)
-                .hint(hint)
-                .maxTime(maxTimeMS, TimeUnit.MILLISECONDS);
+                .hint(hint);
     }
 
     private List<BsonDocument> getPipeline() {
@@ -175,7 +165,7 @@ private List<BsonDocument> getPipeline() {
         return pipeline;
     }
 
-    private Long getCountFromAggregateResults(final List<BsonDocument> results) {
+    private Long getCountFromAggregateResults(@Nullable final List<BsonDocument> results) {
         if (results == null || results.isEmpty()) {
             return 0L;
         } else {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java
index 43298bae4bf..f9aa0a8eaa2 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java
@@ -18,11 +18,9 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.client.model.Collation;
-import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonString;
@@ -30,8 +28,6 @@
 import org.bson.codecs.BsonDocumentCodec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync;
@@ -53,7 +49,6 @@ public class CountOperation implements AsyncReadOperation<Long>, ReadOperation<L
     private BsonValue hint;
     private long skip;
     private long limit;
-    private long maxTimeMS;
     private Collation collation;
 
     public CountOperation(final MongoNamespace namespace) {
@@ -105,17 +100,6 @@ public CountOperation skip(final long skip) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public CountOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public Collation getCollation() {
         return collation;
     }
@@ -128,13 +112,13 @@ public CountOperation collation(@Nullable final Collation collation) {
     @Override
     public Long execute(final ReadBinding binding) {
         return executeRetryableRead(binding, namespace.getDatabaseName(),
-                getCommandCreator(binding.getSessionContext()), DECODER, transformer(), retryReads);
+                                    getCommandCreator(), DECODER, transformer(), retryReads);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<Long> callback) {
-        executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), DECODER,
-                asyncTransformer(), retryReads, callback);
+        executeRetryableReadAsync(binding, namespace.getDatabaseName(),
+                                  getCommandCreator(), DECODER, asyncTransformer(), retryReads, callback);
     }
 
     private CommandReadTransformer<BsonDocument, Long> transformer() {
@@ -145,24 +129,21 @@ private CommandReadTransformerAsync<BsonDocument, Long> asyncTransformer() {
         return (result, source, connection) -> (result.getNumber("n")).longValue();
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription);
-    }
-
-    private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) {
-        BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName()));
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName()));
 
-        appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document);
+            appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document);
 
-        putIfNotNull(document, "query", filter);
-        putIfNotZero(document, "limit", limit);
-        putIfNotZero(document, "skip", skip);
-        putIfNotNull(document, "hint", hint);
-        putIfNotZero(document, "maxTimeMS", maxTimeMS);
+            putIfNotNull(document, "query", filter);
+            putIfNotZero(document, "limit", limit);
+            putIfNotZero(document, "skip", skip);
+            putIfNotNull(document, "hint", hint);
 
-        if (collation != null) {
-            document.put("collation", collation.asDocument());
-        }
-        return document;
+            if (collation != null) {
+                document.put("collation", collation.asDocument());
+            }
+            return document;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java
index c78fee6838e..d9a11d20287 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java
@@ -92,10 +92,6 @@ public class CreateCollectionOperation implements AsyncWriteOperation<Void>, Wri
     private String clusteredIndexName;
     private BsonDocument encryptedFields;
 
-    public CreateCollectionOperation(final String databaseName, final String collectionName) {
-        this(databaseName, collectionName, null);
-    }
-
     public CreateCollectionOperation(final String databaseName, final String collectionName, @Nullable final WriteConcern writeConcern) {
         this.databaseName = notNull("databaseName", databaseName);
         this.collectionName = notNull("collectionName", collectionName);
@@ -241,7 +237,7 @@ public Void execute(final WriteBinding binding) {
             checkEncryptedFieldsSupported(connection.getDescription());
             getCommandFunctions().forEach(commandCreator ->
                executeCommand(binding, databaseName, commandCreator.get(), connection,
-                      writeConcernErrorTransformer())
+                      writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()))
             );
             return null;
         });
@@ -425,7 +421,7 @@ public void onResult(@Nullable final Void result, @Nullable final Throwable t) {
                 finalCallback.onResult(null, null);
             } else {
                 executeCommandAsync(binding, databaseName, nextCommandFunction.get(),
-                        connection, writeConcernErrorTransformerAsync(), this);
+                        connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this);
             }
         }
     }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java
index f3aae267b62..76de0757ff1 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java
@@ -25,7 +25,6 @@
 import com.mongodb.MongoNamespace;
 import com.mongodb.WriteConcern;
 import com.mongodb.WriteConcernResult;
-import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.binding.WriteBinding;
@@ -44,19 +43,12 @@
 import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
-import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
-import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync;
-import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback;
-import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection;
 import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync;
-import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
 import static com.mongodb.internal.operation.IndexHelper.generateIndexName;
-import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand;
-import static com.mongodb.internal.operation.SyncOperationHelper.withConnection;
 import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer;
 import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand;
 
@@ -69,13 +61,8 @@ public class CreateIndexesOperation implements AsyncWriteOperation<Void>, WriteO
     private final MongoNamespace namespace;
     private final List<IndexRequest> requests;
     private final WriteConcern writeConcern;
-    private long maxTimeMS;
     private CreateIndexCommitQuorum commitQuorum;
 
-    public CreateIndexesOperation(final MongoNamespace namespace, final List<IndexRequest> requests) {
-        this(namespace, requests, null);
-    }
-
     public CreateIndexesOperation(final MongoNamespace namespace, final List<IndexRequest> requests,
             @Nullable final WriteConcern writeConcern) {
         this.namespace = notNull("namespace", namespace);
@@ -103,18 +90,6 @@ public List<String> getIndexNames() {
         return indexNames;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public CreateIndexesOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxTime >= 0", maxTime >= 0);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public CreateIndexCommitQuorum getCommitQuorum() {
         return commitQuorum;
     }
@@ -126,34 +101,25 @@ public CreateIndexesOperation commitQuorum(@Nullable final CreateIndexCommitQuor
 
     @Override
     public Void execute(final WriteBinding binding) {
-        return withConnection(binding, connection -> {
-            try {
-                executeCommand(binding, namespace.getDatabaseName(), getCommand(connection.getDescription()),
-                        connection, writeConcernErrorTransformer());
-            } catch (MongoCommandException e) {
-                throw checkForDuplicateKeyError(e);
-            }
-            return null;
-        });
+        try {
+            return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer(
+                    binding.getOperationContext().getTimeoutContext()));
+        } catch (MongoCommandException e) {
+            throw checkForDuplicateKeyError(e);
+        }
     }
 
     @Override
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<Void> callback) {
-        withAsyncConnection(binding, (connection, t) -> {
-            SingleResultCallback<Void> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
-            if (t != null) {
-                errHandlingCallback.onResult(null, t);
-            } else {
-                SingleResultCallback<Void> wrappedCallback = releasingCallback(errHandlingCallback, connection);
-                try {
-                    executeCommandAsync(binding, namespace.getDatabaseName(),
-                            getCommand(connection.getDescription()), connection, writeConcernErrorTransformerAsync(),
-                            (result, t12) -> wrappedCallback.onResult(null, translateException(t12)));
-                } catch (Throwable t1) {
-                    wrappedCallback.onResult(null, t1);
-                }
-            }
-        });
+        executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformerAsync(binding
+                        .getOperationContext().getTimeoutContext()),
+                ((result, t) -> {
+                    if (t != null) {
+                        callback.onResult(null, translateException(t));
+                    } else {
+                        callback.onResult(result, null);
+                    }
+                }));
     }
 
     @SuppressWarnings("deprecation")
@@ -221,24 +187,25 @@ private BsonDocument getIndex(final IndexRequest request) {
         return index;
     }
 
-    private BsonDocument getCommand(final ConnectionDescription description) {
-        BsonDocument command = new BsonDocument("createIndexes", new BsonString(namespace.getCollectionName()));
-        List<BsonDocument> values = new ArrayList<>();
-        for (IndexRequest request : requests) {
-            values.add(getIndex(request));
-        }
-        command.put("indexes", new BsonArray(values));
-        putIfNotZero(command, "maxTimeMS", maxTimeMS);
-        appendWriteConcernToCommand(writeConcern, command);
-        if (commitQuorum != null) {
-            if (serverIsAtLeastVersionFourDotFour(description)) {
-                command.put("commitQuorum", commitQuorum.toBsonValue());
-            } else {
-                throw new MongoClientException("Specifying a value for the create index commit quorum option "
-                        + "requires a minimum MongoDB version of 4.4");
+    private CommandOperationHelper.CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument command = new BsonDocument("createIndexes", new BsonString(namespace.getCollectionName()));
+            List<BsonDocument> values = new ArrayList<>();
+            for (IndexRequest request : requests) {
+                values.add(getIndex(request));
             }
-        }
-        return command;
+            command.put("indexes", new BsonArray(values));
+            appendWriteConcernToCommand(writeConcern, command);
+            if (commitQuorum != null) {
+                if (serverIsAtLeastVersionFourDotFour(connectionDescription)) {
+                    command.put("commitQuorum", commitQuorum.toBsonValue());
+                } else {
+                    throw new MongoClientException("Specifying a value for the create index commit quorum option "
+                            + "requires a minimum MongoDB version of 4.4");
+                }
+            }
+            return command;
+        };
     }
 
     @Nullable
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java
index 8d1e98de6b8..3636db08593 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java
@@ -55,7 +55,7 @@ public class CreateViewOperation implements AsyncWriteOperation<Void>, WriteOper
     private Collation collation;
 
     public CreateViewOperation(final String databaseName, final String viewName, final String viewOn, final List<BsonDocument> pipeline,
-                               final WriteConcern writeConcern) {
+            final WriteConcern writeConcern) {
         this.databaseName = notNull("databaseName", databaseName);
         this.viewName = notNull("viewName", viewName);
         this.viewOn = notNull("viewOn", viewOn);
@@ -127,7 +127,7 @@ public CreateViewOperation collation(@Nullable final Collation collation) {
     public Void execute(final WriteBinding binding) {
         return withConnection(binding, connection -> {
             executeCommand(binding, databaseName, getCommand(), new BsonDocumentCodec(),
-                    writeConcernErrorTransformer());
+                    writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()));
             return null;
         });
     }
@@ -140,7 +140,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall
                 errHandlingCallback.onResult(null, t);
             } else {
                 SingleResultCallback<Void> wrappedCallback = releasingCallback(errHandlingCallback, connection);
-                executeCommandAsync(binding, databaseName, getCommand(), connection, writeConcernErrorTransformerAsync(),
+                executeCommandAsync(binding, databaseName, getCommand(), connection,
+                        writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()),
                         wrappedCallback);
             }
         });
diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java
index 7aeaad49118..78529cfda44 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java
@@ -20,6 +20,8 @@
 import com.mongodb.MongoSocketException;
 import com.mongodb.ServerCursor;
 import com.mongodb.annotations.ThreadSafe;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.lang.Nullable;
@@ -54,6 +56,8 @@
 @ThreadSafe
 abstract class CursorResourceManager<CS extends ReferenceCounted, C extends ReferenceCounted> {
     private final Lock lock;
+    private final TimeoutContext timeoutContext;
+    private final TimeoutMode timeoutMode;
     private final MongoNamespace namespace;
     private volatile State state;
     @Nullable
@@ -63,13 +67,18 @@ abstract class CursorResourceManager<CS extends ReferenceCounted, C extends Refe
     @Nullable
     private volatile ServerCursor serverCursor;
     private volatile boolean skipReleasingServerResourcesOnClose;
+    private boolean closeWithoutTimeoutReset;
 
     CursorResourceManager(
+            final TimeoutContext timeoutContext,
+            final TimeoutMode timeoutMode,
             final MongoNamespace namespace,
             final CS connectionSource,
             @Nullable final C connectionToPin,
             @Nullable final ServerCursor serverCursor) {
         this.lock = new ReentrantLock();
+        this.timeoutContext = timeoutContext;
+        this.timeoutMode = timeoutMode;
         this.namespace = namespace;
         this.state = State.IDLE;
         if (serverCursor != null) {
@@ -83,6 +92,7 @@ abstract class CursorResourceManager<CS extends ReferenceCounted, C extends Refe
         }
         this.skipReleasingServerResourcesOnClose = false;
         this.serverCursor = serverCursor;
+        this.closeWithoutTimeoutReset = false;
     }
 
     /**
@@ -125,6 +135,22 @@ boolean isSkipReleasingServerResourcesOnClose() {
     @SuppressWarnings("SameParameterValue")
     abstract void markAsPinned(C connectionToPin, Connection.PinningMode pinningMode);
 
+    void checkTimeoutModeAndResetTimeoutContextIfIteration() {
+        if (timeoutMode == TimeoutMode.ITERATION) {
+            resetTimeout();
+        }
+    }
+
+    void resetTimeout() {
+        if (!closeWithoutTimeoutReset) {
+            timeoutContext.resetTimeoutIfPresent();
+        }
+    }
+
+    void setCloseWithoutTimeoutReset(final boolean closeImmediately) {
+        this.closeWithoutTimeoutReset = closeImmediately;
+    }
+
     /**
      * Thread-safe.
      */
diff --git a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java
index d9fa0cfd72e..ee11ee5cce1 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java
@@ -18,12 +18,10 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.client.model.Collation;
-import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonString;
@@ -31,15 +29,12 @@
 import org.bson.codecs.Codec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync;
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
-import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead;
@@ -52,13 +47,11 @@
  */
 public class DistinctOperation<T> implements AsyncReadOperation<AsyncBatchCursor<T>>, ReadOperation<BatchCursor<T>> {
     private static final String VALUES = "values";
-
     private final MongoNamespace namespace;
     private final String fieldName;
     private final Decoder<T> decoder;
     private boolean retryReads;
     private BsonDocument filter;
-    private long maxTimeMS;
     private Collation collation;
     private BsonValue comment;
 
@@ -86,17 +79,6 @@ public boolean getRetryReads() {
         return retryReads;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public DistinctOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public Collation getCollation() {
         return collation;
     }
@@ -117,34 +99,32 @@ public DistinctOperation<T> comment(final BsonValue comment) {
 
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                createCommandDecoder(), singleBatchCursorTransformer(VALUES), retryReads);
+        return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(),
+                singleBatchCursorTransformer(VALUES), retryReads);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads, errorHandlingCallback(callback, LOGGER));
+        executeRetryableReadAsync(binding, namespace.getDatabaseName(),
+                                  getCommandCreator(), createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads,
+                                  errorHandlingCallback(callback, LOGGER));
     }
 
     private Codec<BsonDocument> createCommandDecoder() {
         return CommandResultDocumentCodec.create(decoder, VALUES);
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription);
-    }
-
-    private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) {
-        BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName()));
-        appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), commandDocument);
-        commandDocument.put("key", new BsonString(fieldName));
-        putIfNotNull(commandDocument, "query", filter);
-        putIfNotZero(commandDocument, "maxTimeMS", maxTimeMS);
-        if (collation != null) {
-            commandDocument.put("collation", collation.asDocument());
-        }
-        putIfNotNull(commandDocument, "comment", comment);
-        return commandDocument;
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName()));
+            appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument);
+            commandDocument.put("key", new BsonString(fieldName));
+            putIfNotNull(commandDocument, "query", filter);
+            if (collation != null) {
+                commandDocument.put("collation", collation.asDocument());
+            }
+            putIfNotNull(commandDocument, "comment", comment);
+            return commandDocument;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java
index d0e73948339..46a66fcf28e 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java
@@ -59,6 +59,12 @@ static void putIfNotNull(final BsonDocument command, final String key, @Nullable
         }
     }
 
+    static void putIfNotNull(final BsonDocument command, final String key, @Nullable final Boolean value) {
+        if (value != null) {
+            command.put(key, new BsonBoolean(value));
+        }
+    }
+
     static void putIfNotZero(final BsonDocument command, final String key, final int value) {
         if (value != 0) {
             command.put(key, new BsonInt32(value));
diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java
index 6ddc087bdee..d879f83e542 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.WriteConcern;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadWriteBinding;
@@ -66,10 +67,6 @@ public class DropCollectionOperation implements AsyncWriteOperation<Void>, Write
     private BsonDocument encryptedFields;
     private boolean autoEncryptedFields;
 
-    public DropCollectionOperation(final MongoNamespace namespace) {
-        this(namespace, null);
-    }
-
     public DropCollectionOperation(final MongoNamespace namespace, @Nullable final WriteConcern writeConcern) {
         this.namespace = notNull("namespace", namespace);
         this.writeConcern = writeConcern;
@@ -96,7 +93,7 @@ public Void execute(final WriteBinding binding) {
             getCommands(localEncryptedFields).forEach(command -> {
                 try {
                     executeCommand(binding, namespace.getDatabaseName(), command.get(),
-                            connection, writeConcernErrorTransformer());
+                            connection, writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()));
                 } catch (MongoCommandException e) {
                     rethrowIfNotNamespaceError(e);
                 }
@@ -251,8 +248,12 @@ public void onResult(@Nullable final Void result, @Nullable final Throwable t) {
             if (nextCommandFunction == null) {
                 finalCallback.onResult(null, null);
             } else {
-                executeCommandAsync(binding, namespace.getDatabaseName(), nextCommandFunction.get(),
-                        connection, writeConcernErrorTransformerAsync(), this);
+                try {
+                    executeCommandAsync(binding, namespace.getDatabaseName(), nextCommandFunction.get(),
+                            connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this);
+                } catch (MongoOperationTimeoutException operationTimeoutException) {
+                    finalCallback.onResult(null, operationTimeoutException);
+                }
             }
         }
     }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java
index 2dad7dda177..9dd942cb726 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java
@@ -46,10 +46,6 @@ public class DropDatabaseOperation implements AsyncWriteOperation<Void>, WriteOp
     private final String databaseName;
     private final WriteConcern writeConcern;
 
-    public DropDatabaseOperation(final String databaseName) {
-        this(databaseName, null);
-    }
-
     public DropDatabaseOperation(final String databaseName, @Nullable final WriteConcern writeConcern) {
         this.databaseName = notNull("databaseName", databaseName);
         this.writeConcern = writeConcern;
@@ -62,7 +58,8 @@ public WriteConcern getWriteConcern() {
     @Override
     public Void execute(final WriteBinding binding) {
         return withConnection(binding, connection -> {
-            executeCommand(binding, databaseName, getCommand(), connection, writeConcernErrorTransformer());
+            executeCommand(binding, databaseName, getCommand(), connection, writeConcernErrorTransformer(binding.getOperationContext()
+                    .getTimeoutContext()));
             return null;
         });
     }
@@ -75,7 +72,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall
                 errHandlingCallback.onResult(null, t);
             } else {
                 executeCommandAsync(binding, databaseName, getCommand(), connection,
-                        writeConcernErrorTransformerAsync(), releasingCallback(errHandlingCallback, connection));
+                        writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()),
+                        releasingCallback(errHandlingCallback, connection));
 
             }
         });
diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java
index 66bb8f408fb..e66a4e10bbf 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java
@@ -26,21 +26,12 @@
 import org.bson.BsonDocument;
 import org.bson.BsonString;
 
-import java.util.concurrent.TimeUnit;
-
-import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
-import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync;
-import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback;
-import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection;
 import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync;
 import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError;
 import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError;
-import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
-import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand;
-import static com.mongodb.internal.operation.SyncOperationHelper.withConnection;
 import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer;
 import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand;
 
@@ -54,15 +45,6 @@ public class DropIndexOperation implements AsyncWriteOperation<Void>, WriteOpera
     private final String indexName;
     private final BsonDocument indexKeys;
     private final WriteConcern writeConcern;
-    private long maxTimeMS;
-
-    public DropIndexOperation(final MongoNamespace namespace, final String indexName) {
-        this(namespace, indexName, null);
-    }
-
-    public DropIndexOperation(final MongoNamespace namespace, final BsonDocument keys) {
-        this(namespace, keys, null);
-    }
 
     public DropIndexOperation(final MongoNamespace namespace, final String indexName, @Nullable final WriteConcern writeConcern) {
         this.namespace = notNull("namespace", namespace);
@@ -82,61 +64,40 @@ public WriteConcern getWriteConcern() {
         return writeConcern;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public DropIndexOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxTime >= 0", maxTime >= 0);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     @Override
     public Void execute(final WriteBinding binding) {
-        return withConnection(binding, connection -> {
-            try {
-                executeCommand(binding, namespace.getDatabaseName(), getCommand(), connection,
-                        writeConcernErrorTransformer());
-            } catch (MongoCommandException e) {
-                rethrowIfNotNamespaceError(e);
-            }
-            return null;
-        });
+        try {
+            executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer(binding
+                    .getOperationContext()
+                    .getTimeoutContext()));
+        } catch (MongoCommandException e) {
+            rethrowIfNotNamespaceError(e);
+        }
+        return null;
     }
 
     @Override
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<Void> callback) {
-        withAsyncConnection(binding, (connection, t) -> {
-            SingleResultCallback<Void> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
-            if (t != null) {
-                errHandlingCallback.onResult(null, t);
+        executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(),
+                writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, t) -> {
+            if (t != null && !isNamespaceError(t)) {
+                callback.onResult(null, t);
             } else {
-                SingleResultCallback<Void> releasingCallback = releasingCallback(errHandlingCallback, connection);
-                executeCommandAsync(binding, namespace.getDatabaseName(), getCommand(),
-                        connection, writeConcernErrorTransformerAsync(), (result, t1) -> {
-                            if (t1 != null && !isNamespaceError(t1)) {
-                                releasingCallback.onResult(null, t1);
-                            } else {
-                                releasingCallback.onResult(result, null);
-                            }
-                        });
+                callback.onResult(null, null);
             }
         });
     }
 
-    private BsonDocument getCommand() {
-        BsonDocument command = new BsonDocument("dropIndexes", new BsonString(namespace.getCollectionName()));
-        if (indexName != null) {
-            command.put("index", new BsonString(indexName));
-        } else {
-            command.put("index", indexKeys);
-        }
-
-        putIfNotZero(command, "maxTimeMS", maxTimeMS);
-        appendWriteConcernToCommand(writeConcern, command);
-        return command;
+    private CommandOperationHelper.CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument command = new BsonDocument("dropIndexes", new BsonString(namespace.getCollectionName()));
+            if (indexName != null) {
+                command.put("index", new BsonString(indexName));
+            } else {
+                command.put("index", indexKeys);
+            }
+            appendWriteConcernToCommand(writeConcern, command);
+            return command;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java
index 571de884582..17f7e617405 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java
@@ -22,7 +22,6 @@
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonString;
@@ -30,8 +29,6 @@
 import org.bson.codecs.BsonDocumentCodec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync;
@@ -39,7 +36,6 @@
 import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError;
 import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError;
-import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead;
@@ -52,7 +48,6 @@ public class EstimatedDocumentCountOperation implements AsyncReadOperation<Long>
     private static final Decoder<BsonDocument> DECODER = new BsonDocumentCodec();
     private final MongoNamespace namespace;
     private boolean retryReads;
-    private long maxTimeMS;
     private BsonValue comment;
 
     public EstimatedDocumentCountOperation(final MongoNamespace namespace) {
@@ -64,12 +59,6 @@ public EstimatedDocumentCountOperation retryReads(final boolean retryReads) {
         return this;
     }
 
-    public EstimatedDocumentCountOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     @Nullable
     public BsonValue getComment() {
         return comment;
@@ -83,8 +72,9 @@ public EstimatedDocumentCountOperation comment(@Nullable final BsonValue comment
     @Override
     public Long execute(final ReadBinding binding) {
         try {
-            return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                    CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), transformer(), retryReads);
+            return executeRetryableRead(binding, namespace.getDatabaseName(),
+                                        getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")),
+                                        transformer(), retryReads);
         } catch (MongoCommandException e) {
             return assertNotNull(rethrowIfNotNamespaceError(e, 0L));
         }
@@ -92,9 +82,10 @@ public Long execute(final ReadBinding binding) {
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<Long> callback) {
-        executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), asyncTransformer(), retryReads,
-                (result, t) -> {
+        executeRetryableReadAsync(binding, namespace.getDatabaseName(),
+                                  getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")),
+                                  asyncTransformer(), retryReads,
+                                  (result, t) -> {
                     if (isNamespaceError(t)) {
                         callback.onResult(0L, null);
                     } else {
@@ -115,11 +106,10 @@ private long transformResult(final BsonDocument result, final ConnectionDescript
         return (result.getNumber("n")).longValue();
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> {
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
             BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName()));
-            appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document);
-            putIfNotZero(document, "maxTimeMS", maxTimeMS);
+            appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document);
             if (comment != null) {
                 document.put("comment", comment);
             }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java
index 928173ba2fb..c284b942fe4 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java
@@ -28,8 +28,6 @@
 import org.bson.FieldNameValidator;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 /**
  * An operation that atomically finds and deletes a single document.
  *
@@ -38,7 +36,7 @@
 public class FindAndDeleteOperation<T> extends BaseFindAndModifyOperation<T> {
 
     public FindAndDeleteOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites,
-                                  final Decoder<T> decoder) {
+            final Decoder<T> decoder) {
         super(namespace, writeConcern, retryWrites, decoder);
     }
 
@@ -54,12 +52,6 @@ public FindAndDeleteOperation<T> projection(@Nullable final BsonDocument project
         return this;
     }
 
-    @Override
-    public FindAndDeleteOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        super.maxTime(maxTime, timeUnit);
-        return this;
-    }
-
     @Override
     public FindAndDeleteOperation<T> sort(@Nullable final BsonDocument sort) {
         super.sort(sort);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java
index 303d9c0e208..3c143fdde36 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java
@@ -32,7 +32,6 @@
 
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.DocumentHelper.putIfTrue;
@@ -49,7 +48,7 @@ public class FindAndReplaceOperation<T> extends BaseFindAndModifyOperation<T> {
     private Boolean bypassDocumentValidation;
 
     public FindAndReplaceOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites,
-                                   final Decoder<T> decoder, final BsonDocument replacement) {
+            final Decoder<T> decoder, final BsonDocument replacement) {
         super(namespace, writeConcern, retryWrites, decoder);
         this.replacement = notNull("replacement", replacement);
     }
@@ -97,12 +96,6 @@ public FindAndReplaceOperation<T> projection(@Nullable final BsonDocument projec
         return this;
     }
 
-    @Override
-    public FindAndReplaceOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        super.maxTime(maxTime, timeUnit);
-        return this;
-    }
-
     @Override
     public FindAndReplaceOperation<T> sort(@Nullable final BsonDocument sort) {
         super.sort(sort);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java
index 2c2a00ff437..46e1994985c 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java
@@ -34,7 +34,6 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
@@ -53,15 +52,15 @@ public class FindAndUpdateOperation<T> extends BaseFindAndModifyOperation<T> {
     private Boolean bypassDocumentValidation;
     private List<BsonDocument> arrayFilters;
 
-    public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites,
-                                  final Decoder<T> decoder, final BsonDocument update) {
+    public FindAndUpdateOperation(final MongoNamespace namespace,
+            final WriteConcern writeConcern, final boolean retryWrites, final Decoder<T> decoder, final BsonDocument update) {
         super(namespace, writeConcern, retryWrites, decoder);
         this.update = notNull("update", update);
         this.updatePipeline = null;
     }
 
     public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites,
-                                  final Decoder<T> decoder, final List<BsonDocument> update) {
+            final Decoder<T> decoder, final List<BsonDocument> update) {
         super(namespace, writeConcern, retryWrites, decoder);
         this.updatePipeline = update;
         this.update = null;
@@ -125,12 +124,6 @@ public FindAndUpdateOperation<T> projection(@Nullable final BsonDocument project
         return this;
     }
 
-    @Override
-    public FindAndUpdateOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        super.maxTime(maxTime, timeUnit);
-        return this;
-    }
-
     @Override
     public FindAndUpdateOperation<T> sort(@Nullable final BsonDocument sort) {
         super.sort(sort);
diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java
index fa5aa9af1be..514e48b4db8 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java
@@ -21,6 +21,7 @@
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoNamespace;
 import com.mongodb.MongoQueryException;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -28,21 +29,17 @@
 import com.mongodb.internal.async.function.RetryState;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.connection.NoOpSessionContext;
-import com.mongodb.internal.session.SessionContext;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 import org.bson.BsonString;
 import org.bson.BsonValue;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
-import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync;
@@ -56,6 +53,7 @@
 import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.OperationHelper.canRetryRead;
+import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier;
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand;
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
@@ -78,8 +76,6 @@ public class FindOperation<T> implements AsyncExplainableReadOperation<AsyncBatc
     private int batchSize;
     private int limit;
     private BsonDocument projection;
-    private long maxTimeMS;
-    private long maxAwaitTimeMS;
     private int skip;
     private BsonDocument sort;
     private CursorType cursorType = CursorType.NonTailable;
@@ -94,6 +90,7 @@ public class FindOperation<T> implements AsyncExplainableReadOperation<AsyncBatc
     private boolean returnKey;
     private boolean showRecordId;
     private Boolean allowDiskUse;
+    private TimeoutMode timeoutMode;
 
     public FindOperation(final MongoNamespace namespace, final Decoder<T> decoder) {
         this.namespace = notNull("namespace", namespace);
@@ -144,30 +141,6 @@ public FindOperation<T> projection(@Nullable final BsonDocument projection) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public FindOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxTime >= 0", maxTime >= 0);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
-    public long getMaxAwaitTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public FindOperation<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0);
-        this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
-        return this;
-    }
-
     public int getSkip() {
         return skip;
     }
@@ -195,6 +168,13 @@ public FindOperation<T> cursorType(final CursorType cursorType) {
         return this;
     }
 
+    public FindOperation<T> timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        if (timeoutMode != null) {
+            this.timeoutMode = timeoutMode;
+        }
+        return this;
+    }
+
     public boolean isNoCursorTimeout() {
         return noCursorTimeout;
     }
@@ -305,14 +285,19 @@ public FindOperation<T> allowDiskUse(@Nullable final Boolean allowDiskUse) {
 
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        RetryState retryState = initialRetryState(retryReads);
+        IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException();
+        if (invalidTimeoutModeException != null) {
+            throw invalidTimeoutModeException;
+        }
+
+        RetryState retryState = initialRetryState(retryReads,  binding.getOperationContext().getTimeoutContext());
         Supplier<BatchCursor<T>> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () ->
             withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> {
-                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext()));
+                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext()));
                 try {
-                    return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(),
-                            getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH),
-                            transformer(), connection);
+                    return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(),
+                                                       getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIRST_BATCH),
+                                                       transformer(), connection);
                 } catch (MongoCommandException e) {
                     throw new MongoQueryException(e.getResponse(), e.getServerAddress());
                 }
@@ -321,22 +306,28 @@ public BatchCursor<T> execute(final ReadBinding binding) {
         return read.get();
     }
 
-
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        RetryState retryState = initialRetryState(retryReads);
+        IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException();
+        if (invalidTimeoutModeException != null) {
+            callback.onResult(null, invalidTimeoutModeException);
+            return;
+        }
+
+        RetryState retryState = initialRetryState(retryReads,  binding.getOperationContext().getTimeoutContext());
         binding.retain();
         AsyncCallbackSupplier<AsyncBatchCursor<T>> asyncRead = decorateReadWithRetriesAsync(
                 retryState, binding.getOperationContext(), (AsyncCallbackSupplier<AsyncBatchCursor<T>>) funcCallback ->
                     withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback,
                             (source, connection, releasingCallback) -> {
                                 if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(),
-                                        binding.getSessionContext()), releasingCallback)) {
+                                        binding.getOperationContext()), releasingCallback)) {
                                     return;
                                 }
                                 SingleResultCallback<AsyncBatchCursor<T>> wrappedCallback = exceptionTransformingCallback(releasingCallback);
-                                createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(),
-                                        getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH),
+                                createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source,
+                                        namespace.getDatabaseName(), getCommandCreator(),
+                                        CommandResultDocumentCodec.create(decoder, FIRST_BATCH),
                                         asyncTransformer(), connection, wrappedCallback);
                             })
                 ).whenComplete(binding::release);
@@ -362,23 +353,25 @@ private static <T> SingleResultCallback<T> exceptionTransformingCallback(final S
     @Override
     public <R> ReadOperation<R> asExplainableOperation(@Nullable final ExplainVerbosity verbosity,
                                                        final Decoder<R> resultDecoder) {
-        return new CommandReadOperation<>(getNamespace().getDatabaseName(),
-                asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity),
-                resultDecoder);
+        return createExplainableOperation(verbosity, resultDecoder);
     }
 
     @Override
     public <R> AsyncReadOperation<R> asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity,
                                                                  final Decoder<R> resultDecoder) {
+        return createExplainableOperation(verbosity, resultDecoder);
+    }
+
+    <R> CommandReadOperation<R> createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder<R> resultDecoder) {
         return new CommandReadOperation<>(getNamespace().getDatabaseName(),
-                asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity),
-                resultDecoder);
+                (operationContext, serverDescription, connectionDescription) ->
+                        asExplainCommand(getCommand(operationContext, MIN_WIRE_VERSION), verbosity), resultDecoder);
     }
 
-    private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) {
+    private BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) {
         BsonDocument commandDocument = new BsonDocument("find", new BsonString(namespace.getCollectionName()));
 
-        appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument);
+        appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument);
 
         putIfNotNull(commandDocument, "filter", filter);
         putIfNotNullOrEmpty(commandDocument, "sort", sort);
@@ -399,15 +392,17 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m
         if (limit < 0 || batchSize < 0) {
             commandDocument.put("singleBatch", BsonBoolean.TRUE);
         }
-        if (maxTimeMS > 0) {
-            commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
         if (isTailableCursor()) {
             commandDocument.put("tailable", BsonBoolean.TRUE);
+            if (isAwaitData()) {
+                commandDocument.put("awaitData", BsonBoolean.TRUE);
+            } else {
+                operationContext.getTimeoutContext().setMaxTimeOverride(0L);
+            }
+        } else {
+            setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext);
         }
-        if (isAwaitData()) {
-            commandDocument.put("awaitData", BsonBoolean.TRUE);
-        }
+
         if (noCursorTimeout) {
             commandDocument.put("noCursorTimeout", BsonBoolean.TRUE);
         }
@@ -444,8 +439,9 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m
         return commandDocument;
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion());
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) ->
+                getCommand(operationContext, connectionDescription.getMaxWireVersion());
     }
 
     private boolean isTailableCursor() {
@@ -456,17 +452,36 @@ private boolean isAwaitData() {
         return cursorType == CursorType.TailableAwait;
     }
 
-    private CommandReadTransformer<BsonDocument, CommandBatchCursor<T>> transformer() {
-        return (result, source, connection) ->
-                new CommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection);
+    private TimeoutMode getTimeoutMode() {
+        if (timeoutMode == null) {
+            return isTailableCursor() ? TimeoutMode.ITERATION : TimeoutMode.CURSOR_LIFETIME;
+        }
+        return timeoutMode;
     }
 
-    private long getMaxTimeForCursor() {
-        return cursorType == CursorType.TailableAwait ? maxAwaitTimeMS : 0;
+    private CommandReadTransformer<BsonDocument, CommandBatchCursor<T>> transformer() {
+        return (result, source, connection) ->
+                new CommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder,
+                        comment, source, connection);
     }
 
     private CommandReadTransformerAsync<BsonDocument, AsyncBatchCursor<T>> asyncTransformer() {
         return (result, source, connection) ->
-            new AsyncCommandBatchCursor<>(result, batchSize, getMaxTimeForCursor(), decoder, comment, source, connection);
+            new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder,
+                    comment, source, connection);
+    }
+
+    private long getMaxTimeForCursor(final OperationContext operationContext) {
+        return cursorType == CursorType.TailableAwait ? operationContext.getTimeoutContext().getMaxAwaitTimeMS() : 0;
+    }
+
+    @Nullable
+    private IllegalStateException invalidTimeoutModeException() {
+        if (isTailableCursor()) {
+            if (timeoutMode == TimeoutMode.CURSOR_LIFETIME) {
+                return new IllegalStateException("Tailable cursors only support the ITERATION value for the timeoutMode option.");
+            }
+        }
+        return null;
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java
index 5883d68ae18..73abe905aea 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java
@@ -17,7 +17,7 @@
 package com.mongodb.internal.operation;
 
 import com.mongodb.MongoCommandException;
-import com.mongodb.MongoNamespace;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -26,15 +26,12 @@
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.lang.Nullable;
-import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 import org.bson.BsonValue;
 import org.bson.codecs.Codec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
 import static com.mongodb.assertions.Assertions.notNull;
@@ -46,6 +43,7 @@
 import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync;
 import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection;
 import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor;
+import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState;
 import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError;
 import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError;
@@ -54,6 +52,7 @@
 import static com.mongodb.internal.operation.DocumentHelper.putIfTrue;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.OperationHelper.canRetryRead;
+import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier;
 import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
 import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute;
@@ -76,10 +75,10 @@ public class ListCollectionsOperation<T> implements AsyncReadOperation<AsyncBatc
     private boolean retryReads;
     private BsonDocument filter;
     private int batchSize;
-    private long maxTimeMS;
     private boolean nameOnly;
     private boolean authorizedCollections;
     private BsonValue comment;
+    private TimeoutMode timeoutMode = TimeoutMode.CURSOR_LIFETIME;
 
     public ListCollectionsOperation(final String databaseName, final Decoder<T> decoder) {
         this.databaseName = notNull("databaseName", databaseName);
@@ -113,17 +112,6 @@ public ListCollectionsOperation<T> batchSize(final int batchSize) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public ListCollectionsOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public ListCollectionsOperation<T> retryReads(final boolean retryReads) {
         this.retryReads = retryReads;
         return this;
@@ -157,15 +145,27 @@ public boolean isAuthorizedCollections() {
         return authorizedCollections;
     }
 
+
+    public TimeoutMode getTimeoutMode() {
+        return timeoutMode;
+    }
+
+    public ListCollectionsOperation<T> timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        if (timeoutMode != null) {
+            this.timeoutMode = timeoutMode;
+        }
+        return this;
+    }
+
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        RetryState retryState = initialRetryState(retryReads);
+        RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
         Supplier<BatchCursor<T>> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () ->
             withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> {
-                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext()));
+                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext()));
                 try {
-                    return createReadCommandAndExecute(retryState, binding, source, databaseName, getCommandCreator(),
-                            createCommandDecoder(), commandTransformer(), connection);
+                    return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, databaseName,
+                                                       getCommandCreator(), createCommandDecoder(), transformer(), connection);
                 } catch (MongoCommandException e) {
                     return rethrowIfNotNamespaceError(e,
                             createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize));
@@ -177,18 +177,19 @@ public BatchCursor<T> execute(final ReadBinding binding) {
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        RetryState retryState = initialRetryState(retryReads);
+        RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
         binding.retain();
         AsyncCallbackSupplier<AsyncBatchCursor<T>> asyncRead = decorateReadWithRetriesAsync(
                 retryState, binding.getOperationContext(), (AsyncCallbackSupplier<AsyncBatchCursor<T>>) funcCallback ->
                     withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback,
                             (source, connection, releasingCallback) -> {
                                 if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(),
-                                        binding.getSessionContext()), releasingCallback)) {
+                                        binding.getOperationContext()), releasingCallback)) {
                                     return;
                                 }
-                                createReadCommandAndExecuteAsync(retryState, binding, source, databaseName, getCommandCreator(), createCommandDecoder(),
-                                        asyncTransformer(), connection, (result, t) -> {
+                                createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, databaseName,
+                                                                 getCommandCreator(), createCommandDecoder(), asyncTransformer(), connection,
+                                        (result, t) -> {
                                             if (t != null && !isNamespaceError(t)) {
                                                 releasingCallback.onResult(null, t);
                                             } else {
@@ -201,37 +202,28 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb
         asyncRead.get(errorHandlingCallback(callback, LOGGER));
     }
 
-    private MongoNamespace createNamespace() {
-        return new MongoNamespace(databaseName, "$cmd.listCollections");
+    private CommandReadTransformer<BsonDocument, BatchCursor<T>> transformer() {
+        return (result, source, connection) ->
+                cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection);
     }
 
     private CommandReadTransformerAsync<BsonDocument, AsyncBatchCursor<T>> asyncTransformer() {
-        return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize);
-    }
-
-    private CommandReadTransformer<BsonDocument, BatchCursor<T>> commandTransformer() {
-        return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize);
-    }
-
-    private CommandOperationHelper.CommandCreator getCommandCreator() {
-        return (serverDescription, connectionDescription) -> getCommand();
-    }
-
-    private BsonDocument getCommand() {
-        BsonDocument command = new BsonDocument("listCollections", new BsonInt32(1))
-                .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize));
-        if (filter != null) {
-            command.append("filter", filter);
-        }
-        if (nameOnly) {
-            command.append("nameOnly", BsonBoolean.TRUE);
-        }
-        putIfTrue(command, "authorizedCollections", authorizedCollections);
-        if (maxTimeMS > 0) {
-            command.put("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
-        putIfNotNull(command, "comment", comment);
-        return command;
+        return (result, source, connection) ->
+                cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection);
+    }
+
+
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument commandDocument = new BsonDocument("listCollections", new BsonInt32(1))
+                    .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize));
+            putIfNotNull(commandDocument, "filter", filter);
+            putIfTrue(commandDocument, "nameOnly", nameOnly);
+            putIfTrue(commandDocument, "authorizedCollections", authorizedCollections);
+            setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext);
+            putIfNotNull(commandDocument, "comment", comment);
+            return commandDocument;
+        };
     }
 
     private Codec<BsonDocument> createCommandDecoder() {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java
index fec689c938f..5f61c9192dd 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java
@@ -16,21 +16,16 @@
 
 package com.mongodb.internal.operation;
 
-
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.lang.Nullable;
-import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
-import org.bson.BsonInt64;
 import org.bson.BsonValue;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer;
@@ -48,13 +43,9 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public class ListDatabasesOperation<T> implements AsyncReadOperation<AsyncBatchCursor<T>>, ReadOperation<BatchCursor<T>> {
-
     private static final String DATABASES = "databases";
-
     private final Decoder<T> decoder;
     private boolean retryReads;
-
-    private long maxTimeMS;
     private BsonDocument filter;
     private Boolean nameOnly;
     private Boolean authorizedDatabasesOnly;
@@ -64,17 +55,6 @@ public ListDatabasesOperation(final Decoder<T> decoder) {
         this.decoder = notNull("decoder", decoder);
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public ListDatabasesOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public ListDatabasesOperation<T> filter(@Nullable final BsonDocument filter) {
         this.filter = filter;
         return this;
@@ -123,38 +103,24 @@ public ListDatabasesOperation<T> comment(@Nullable final BsonValue comment) {
 
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        return executeRetryableRead(binding, "admin", getCommandCreator(),
-                CommandResultDocumentCodec.create(decoder, DATABASES),
+        return executeRetryableRead(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES),
                 singleBatchCursorTransformer(DATABASES), retryReads);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        executeRetryableReadAsync(binding, "admin", getCommandCreator(),
-                CommandResultDocumentCodec.create(decoder, DATABASES),
-                asyncSingleBatchCursorTransformer(DATABASES), retryReads,
-                errorHandlingCallback(callback, LOGGER));
+        executeRetryableReadAsync(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES),
+                asyncSingleBatchCursorTransformer(DATABASES), retryReads, errorHandlingCallback(callback, LOGGER));
     }
 
     private CommandCreator getCommandCreator() {
-        return (serverDescription, connectionDescription) -> getCommand();
-    }
-
-    private BsonDocument getCommand() {
-        BsonDocument command = new BsonDocument("listDatabases", new BsonInt32(1));
-        if (maxTimeMS > 0) {
-            command.put("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
-        if (filter != null) {
-            command.put("filter", filter);
-        }
-        if (nameOnly != null) {
-            command.put("nameOnly", new BsonBoolean(nameOnly));
-        }
-        if (authorizedDatabasesOnly != null) {
-            command.put("authorizedDatabases", new BsonBoolean(authorizedDatabasesOnly));
-        }
-        putIfNotNull(command, "comment", comment);
-        return command;
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument commandDocument = new BsonDocument("listDatabases", new BsonInt32(1));
+            putIfNotNull(commandDocument, "filter", filter);
+            putIfNotNull(commandDocument, "nameOnly", nameOnly);
+            putIfNotNull(commandDocument, "authorizedDatabases", authorizedDatabasesOnly);
+            putIfNotNull(commandDocument, "comment", comment);
+            return commandDocument;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java
index e4d0138121d..e540f752dbc 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoNamespace;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.async.function.AsyncCallbackSupplier;
@@ -26,13 +27,11 @@
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
-import org.bson.BsonInt64;
 import org.bson.BsonString;
 import org.bson.BsonValue;
 import org.bson.codecs.Codec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
 import static com.mongodb.assertions.Assertions.notNull;
@@ -50,6 +49,7 @@
 import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
+import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier;
 import static com.mongodb.internal.operation.OperationHelper.canRetryRead;
 import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
@@ -69,8 +69,8 @@ public class ListIndexesOperation<T> implements AsyncReadOperation<AsyncBatchCur
     private final Decoder<T> decoder;
     private boolean retryReads;
     private int batchSize;
-    private long maxTimeMS;
     private BsonValue comment;
+    private TimeoutMode timeoutMode = TimeoutMode.CURSOR_LIFETIME;
 
     public ListIndexesOperation(final MongoNamespace namespace, final Decoder<T> decoder) {
         this.namespace = notNull("namespace", namespace);
@@ -86,17 +86,6 @@ public ListIndexesOperation<T> batchSize(final int batchSize) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS);
-    }
-
-    public ListIndexesOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public ListIndexesOperation<T> retryReads(final boolean retryReads) {
         this.retryReads = retryReads;
         return this;
@@ -116,15 +105,26 @@ public ListIndexesOperation<T> comment(@Nullable final BsonValue comment) {
         return this;
     }
 
+    public TimeoutMode getTimeoutMode() {
+        return timeoutMode;
+    }
+
+    public ListIndexesOperation<T> timeoutMode(@Nullable final TimeoutMode timeoutMode) {
+        if (timeoutMode != null) {
+            this.timeoutMode = timeoutMode;
+        }
+        return this;
+    }
+
     @Override
     public BatchCursor<T> execute(final ReadBinding binding) {
-        RetryState retryState = initialRetryState(retryReads);
+        RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
         Supplier<BatchCursor<T>> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () ->
             withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> {
-                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext()));
+                retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext()));
                 try {
-                    return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(),
-                            createCommandDecoder(), transformer(), connection);
+                    return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(),
+                                                       getCommandCreator(), createCommandDecoder(), transformer(), connection);
                 } catch (MongoCommandException e) {
                     return rethrowIfNotNamespaceError(e,
                             createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize));
@@ -136,18 +136,20 @@ public BatchCursor<T> execute(final ReadBinding binding) {
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<AsyncBatchCursor<T>> callback) {
-        RetryState retryState = initialRetryState(retryReads);
+        RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
         binding.retain();
         AsyncCallbackSupplier<AsyncBatchCursor<T>> asyncRead = decorateReadWithRetriesAsync(
                 retryState, binding.getOperationContext(), (AsyncCallbackSupplier<AsyncBatchCursor<T>>) funcCallback ->
                     withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback,
                             (source, connection, releasingCallback) -> {
                                 if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(),
-                                        binding.getSessionContext()), releasingCallback)) {
+                                        binding.getOperationContext()), releasingCallback)) {
                                     return;
                                 }
-                                createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(),
-                                        createCommandDecoder(), asyncTransformer(), connection, (result, t) -> {
+                                createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source,
+                                        namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(),
+                                        asyncTransformer(), connection,
+                                        (result, t) -> {
                                             if (t != null && !isNamespaceError(t)) {
                                                 releasingCallback.onResult(null, t);
                                             } else {
@@ -162,25 +164,23 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb
 
 
     private CommandCreator getCommandCreator() {
-        return (serverDescription, connectionDescription) -> getCommand();
-    }
-
-    private BsonDocument getCommand() {
-        BsonDocument command = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName()))
-                .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize));
-        if (maxTimeMS > 0) {
-            command.put("maxTimeMS", new BsonInt64(maxTimeMS));
-        }
-        putIfNotNull(command, "comment", comment);
-        return command;
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument commandDocument = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName()))
+                    .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize));
+            setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext);
+            putIfNotNull(commandDocument, "comment", comment);
+            return commandDocument;
+        };
     }
 
     private CommandReadTransformer<BsonDocument, BatchCursor<T>> transformer() {
-        return (result, source, connection) -> cursorDocumentToBatchCursor(result, decoder, comment, source, connection, batchSize);
+        return (result, source, connection) ->
+                cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection);
     }
 
     private CommandReadTransformerAsync<BsonDocument, AsyncBatchCursor<T>> asyncTransformer() {
-        return (result, source, connection) -> cursorDocumentToAsyncBatchCursor(result, decoder, comment, source, connection, batchSize);
+        return (result, source, connection) ->
+                cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection);
     }
 
     private Codec<BsonDocument> createCommandDecoder() {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java
index 74313059099..0f9a81dbf19 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java
@@ -31,12 +31,11 @@
 import org.bson.BsonValue;
 import org.bson.codecs.Decoder;
 
-import java.util.Collections;
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor;
 import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError;
 import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor;
+import static java.util.Collections.singletonList;
+
 
 /**
  * An operation that lists Alas Search indexes with the help of {@value #STAGE_LIST_SEARCH_INDEXES} pipeline stage.
@@ -56,26 +55,18 @@ final class ListSearchIndexesOperation<T>
     private final Collation collation;
     @Nullable
     private final BsonValue comment;
-    private final long maxTimeMS;
     @Nullable
     private final String indexName;
     private final boolean retryReads;
 
-    ListSearchIndexesOperation(final MongoNamespace namespace,
-                               final Decoder<T> decoder,
-                               final long maxTimeMS,
-                               @Nullable final String indexName,
-                               @Nullable final Integer batchSize,
-                               @Nullable final Collation collation,
-                               @Nullable final BsonValue comment,
-                               @Nullable final Boolean allowDiskUse,
-                               final boolean retryReads) {
+    ListSearchIndexesOperation(final MongoNamespace namespace, final Decoder<T> decoder, @Nullable final String indexName,
+            @Nullable final Integer batchSize, @Nullable final Collation collation, @Nullable final BsonValue comment,
+            @Nullable final Boolean allowDiskUse, final boolean retryReads) {
         this.namespace = namespace;
         this.decoder = decoder;
         this.allowDiskUse = allowDiskUse;
         this.batchSize = batchSize;
         this.collation = collation;
-        this.maxTimeMS = maxTimeMS;
         this.comment = comment;
         this.indexName = indexName;
         this.retryReads = retryReads;
@@ -122,14 +113,12 @@ public <R> AsyncReadOperation<R> asAsyncExplainableOperation(@Nullable final Exp
     private AggregateOperation<T> asAggregateOperation() {
         BsonDocument searchDefinition = getSearchDefinition();
         BsonDocument listSearchIndexesStage = new BsonDocument(STAGE_LIST_SEARCH_INDEXES, searchDefinition);
-
-        return new AggregateOperation<>(namespace, Collections.singletonList(listSearchIndexesStage), decoder)
+        return new AggregateOperation<>(namespace, singletonList(listSearchIndexesStage), decoder)
                 .retryReads(retryReads)
                 .collation(collation)
                 .comment(comment)
                 .allowDiskUse(allowDiskUse)
-                .batchSize(batchSize)
-                .maxTime(maxTimeMS, TimeUnit.MILLISECONDS);
+                .batchSize(batchSize);
     }
 
     @NonNull
diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java
index 18546027c05..b93be56d6f2 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java
@@ -20,7 +20,7 @@
 import com.mongodb.MongoNamespace;
 import com.mongodb.WriteConcern;
 import com.mongodb.client.model.Collation;
-import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.binding.WriteBinding;
@@ -32,27 +32,21 @@
 import org.bson.codecs.BsonDocumentCodec;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
-import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.isTrue;
 import static com.mongodb.assertions.Assertions.notNull;
-import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandWriteTransformerAsync;
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync;
-import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback;
-import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection;
+import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull;
 import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero;
 import static com.mongodb.internal.operation.DocumentHelper.putIfTrue;
-import static com.mongodb.internal.operation.OperationHelper.LOGGER;
+import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandWriteTransformer;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand;
-import static com.mongodb.internal.operation.SyncOperationHelper.withConnection;
 import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand;
 import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError;
 import static java.util.Arrays.asList;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * Operation that runs a Map Reduce against a MongoDB instance.  This operation does not support "inline" results, i.e. the results will
@@ -63,8 +57,7 @@
  *
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
-public class
-MapReduceToCollectionOperation implements AsyncWriteOperation<MapReduceStatistics>, WriteOperation<MapReduceStatistics> {
+public class MapReduceToCollectionOperation implements AsyncWriteOperation<MapReduceStatistics>, WriteOperation<MapReduceStatistics> {
     private final MongoNamespace namespace;
     private final BsonJavaScript mapFunction;
     private final BsonJavaScript reduceFunction;
@@ -77,7 +70,6 @@
     private int limit;
     private boolean jsMode;
     private boolean verbose;
-    private long maxTimeMS;
     private String action = "replace";
     private String databaseName;
     private Boolean bypassDocumentValidation;
@@ -85,13 +77,7 @@
     private static final List<String> VALID_ACTIONS = asList("replace", "merge", "reduce");
 
     public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction,
-                                          final BsonJavaScript reduceFunction, final String collectionName) {
-        this(namespace, mapFunction, reduceFunction, collectionName, null);
-    }
-
-    public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction,
-                                          final BsonJavaScript reduceFunction, @Nullable final String collectionName,
-                                          @Nullable final WriteConcern writeConcern) {
+            final BsonJavaScript reduceFunction, @Nullable final String collectionName, @Nullable final WriteConcern writeConcern) {
         this.namespace = notNull("namespace", namespace);
         this.mapFunction = notNull("mapFunction", mapFunction);
         this.reduceFunction = notNull("reduceFunction", reduceFunction);
@@ -182,17 +168,6 @@ public MapReduceToCollectionOperation verbose(final boolean verbose) {
         return this;
     }
 
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, MILLISECONDS);
-    }
-
-    public MapReduceToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
     public String getAction() {
         return action;
     }
@@ -234,23 +209,16 @@ public MapReduceToCollectionOperation collation(@Nullable final Collation collat
 
     @Override
     public MapReduceStatistics execute(final WriteBinding binding) {
-        return withConnection(binding, connection -> assertNotNull(executeCommand(binding, namespace.getDatabaseName(),
-                getCommand(connection.getDescription()), connection, transformer())));
+        return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), transformer(binding
+                .getOperationContext()
+                .getTimeoutContext()));
     }
 
     @Override
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<MapReduceStatistics> callback) {
-        withAsyncConnection(binding, (connection, t) -> {
-            SingleResultCallback<MapReduceStatistics> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
-            if (t != null) {
-                errHandlingCallback.onResult(null, t);
-            } else {
-                executeCommandAsync(binding, namespace.getDatabaseName(),
-                        getCommand(connection.getDescription()), connection, transformerAsync(),
-                        releasingCallback(errHandlingCallback, connection));
-
-            }
-        });
+        executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), transformerAsync(binding
+                .getOperationContext()
+                .getTimeoutContext()), callback);
     }
 
     /**
@@ -274,54 +242,56 @@ public AsyncReadOperation<BsonDocument> asExplainableOperationAsync(final Explai
     }
 
     private CommandReadOperation<BsonDocument> createExplainableOperation(final ExplainVerbosity explainVerbosity) {
-        return new CommandReadOperation<>(namespace.getDatabaseName(),
-                ExplainHelper.asExplainCommand(getCommand(null), explainVerbosity),
-                new BsonDocumentCodec());
+        return new CommandReadOperation<>(getNamespace().getDatabaseName(),
+                (operationContext, serverDescription, connectionDescription) ->
+                        asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription),
+                                explainVerbosity), new BsonDocumentCodec());
     }
 
-    private CommandWriteTransformer<BsonDocument, MapReduceStatistics> transformer() {
+    private CommandWriteTransformer<BsonDocument, MapReduceStatistics> transformer(final TimeoutContext timeoutContext) {
         return (result, connection) -> {
             throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
-                    connection.getDescription().getMaxWireVersion());
+                    connection.getDescription().getMaxWireVersion(), timeoutContext);
             return MapReduceHelper.createStatistics(result);
         };
     }
 
-    private CommandWriteTransformerAsync<BsonDocument, MapReduceStatistics> transformerAsync() {
+    private CommandWriteTransformerAsync<BsonDocument, MapReduceStatistics> transformerAsync(final TimeoutContext timeoutContext) {
         return (result, connection) -> {
             throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
-                    connection.getDescription().getMaxWireVersion());
+                    connection.getDescription().getMaxWireVersion(), timeoutContext);
             return MapReduceHelper.createStatistics(result);
         };
     }
 
-    private BsonDocument getCommand(@Nullable final ConnectionDescription description) {
-        BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName()));
-        if (getDatabaseName() != null) {
-            outputDocument.put("db", new BsonString(getDatabaseName()));
-        }
-        BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName()))
-                                           .append("map", getMapFunction())
-                                           .append("reduce", getReduceFunction())
-                                           .append("out", outputDocument);
-
-        putIfNotNull(commandDocument, "query", getFilter());
-        putIfNotNull(commandDocument, "sort", getSort());
-        putIfNotNull(commandDocument, "finalize", getFinalizeFunction());
-        putIfNotNull(commandDocument, "scope", getScope());
-        putIfTrue(commandDocument, "verbose", isVerbose());
-        putIfNotZero(commandDocument, "limit", getLimit());
-        putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS));
-        putIfTrue(commandDocument, "jsMode", isJsMode());
-        if (bypassDocumentValidation != null && description != null) {
-            commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation));
-        }
-        if (description != null) {
+
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+            BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName()));
+            if (getDatabaseName() != null) {
+                outputDocument.put("db", new BsonString(getDatabaseName()));
+            }
+            BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName()))
+                    .append("map", getMapFunction())
+                    .append("reduce", getReduceFunction())
+                    .append("out", outputDocument);
+
+            putIfNotNull(commandDocument, "query", getFilter());
+            putIfNotNull(commandDocument, "sort", getSort());
+            putIfNotNull(commandDocument, "finalize", getFinalizeFunction());
+            putIfNotNull(commandDocument, "scope", getScope());
+            putIfTrue(commandDocument, "verbose", isVerbose());
+            putIfNotZero(commandDocument, "limit", getLimit());
+            putIfTrue(commandDocument, "jsMode", isJsMode());
+            if (bypassDocumentValidation != null) {
+                commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation));
+            }
             appendWriteConcernToCommand(writeConcern, commandDocument);
-        }
-        if (collation != null) {
-            commandDocument.put("collation", collation.asDocument());
-        }
-        return commandDocument;
+            if (collation != null) {
+                commandDocument.put("collation", collation.asDocument());
+            }
+            return commandDocument;
+        };
     }
+
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java
index ff10df61f0e..695053e8845 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java
@@ -22,8 +22,6 @@
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.connection.NoOpSessionContext;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
@@ -32,8 +30,6 @@
 import org.bson.codecs.BsonDocumentCodec;
 import org.bson.codecs.Decoder;
 
-import java.util.concurrent.TimeUnit;
-
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback;
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync;
@@ -45,10 +41,8 @@
 import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand;
 import static com.mongodb.internal.operation.OperationHelper.LOGGER;
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand;
-import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION;
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer;
 import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>Operation that runs a Map Reduce against a MongoDB instance.  This operation only supports "inline" results, i.e. the results will be
@@ -71,11 +65,10 @@ public class MapReduceWithInlineResultsOperation<T> implements AsyncReadOperatio
     private int limit;
     private boolean jsMode;
     private boolean verbose;
-    private long maxTimeMS;
     private Collation collation;
 
     public MapReduceWithInlineResultsOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction,
-                                               final BsonJavaScript reduceFunction, final Decoder<T> decoder) {
+            final BsonJavaScript reduceFunction, final Decoder<T> decoder) {
         this.namespace = notNull("namespace", namespace);
         this.mapFunction = notNull("mapFunction", mapFunction);
         this.reduceFunction = notNull("reduceFunction", reduceFunction);
@@ -170,31 +163,18 @@ public MapReduceWithInlineResultsOperation<T> collation(@Nullable final Collatio
         return this;
     }
 
-
-    public long getMaxTime(final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        return timeUnit.convert(maxTimeMS, MILLISECONDS);
-    }
-
-
-    public MapReduceWithInlineResultsOperation<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit);
-        return this;
-    }
-
-
     @Override
     public MapReduceBatchCursor<T> execute(final ReadBinding binding) {
-        return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
+        return executeRetryableRead(binding, namespace.getDatabaseName(),
+                getCommandCreator(),
                 CommandResultDocumentCodec.create(decoder, "results"), transformer(), false);
     }
 
     @Override
     public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback<MapReduceAsyncBatchCursor<T>> callback) {
         SingleResultCallback<MapReduceAsyncBatchCursor<T>> errHandlingCallback = errorHandlingCallback(callback, LOGGER);
-        executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()),
-                CommandResultDocumentCodec.create(decoder, "results"),
+        executeRetryableReadAsync(binding, namespace.getDatabaseName(),
+                getCommandCreator(), CommandResultDocumentCodec.create(decoder, "results"),
                 asyncTransformer(), false, errHandlingCallback);
     }
 
@@ -208,7 +188,8 @@ public AsyncReadOperation<BsonDocument> asExplainableOperationAsync(final Explai
 
     private CommandReadOperation<BsonDocument> createExplainableOperation(final ExplainVerbosity explainVerbosity) {
         return new CommandReadOperation<>(namespace.getDatabaseName(),
-                asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION),
+                (operationContext, serverDescription, connectionDescription) ->
+                        asExplainCommand(getCommandCreator().create(operationContext, serverDescription, connectionDescription),
                         explainVerbosity), new BsonDocumentCodec());
     }
 
@@ -226,28 +207,26 @@ private CommandReadTransformerAsync<BsonDocument, MapReduceAsyncBatchCursor<T>>
                 MapReduceHelper.createStatistics(result));
     }
 
-    private CommandCreator getCommandCreator(final SessionContext sessionContext) {
-        return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion());
-    }
-
-    private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) {
-        BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName()))
-                                           .append("map", getMapFunction())
-                                           .append("reduce", getReduceFunction())
-                                           .append("out", new BsonDocument("inline", new BsonInt32(1)));
-
-        putIfNotNull(commandDocument, "query", getFilter());
-        putIfNotNull(commandDocument, "sort", getSort());
-        putIfNotNull(commandDocument, "finalize", getFinalizeFunction());
-        putIfNotNull(commandDocument, "scope", getScope());
-        putIfTrue(commandDocument, "verbose", isVerbose());
-        appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument);
-        putIfNotZero(commandDocument, "limit", getLimit());
-        putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS));
-        putIfTrue(commandDocument, "jsMode", isJsMode());
-        if (collation != null) {
-            commandDocument.put("collation", collation.asDocument());
-        }
-        return commandDocument;
+    private CommandCreator getCommandCreator() {
+        return (operationContext, serverDescription, connectionDescription) -> {
+
+            BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName()))
+                    .append("map", getMapFunction())
+                    .append("reduce", getReduceFunction())
+                    .append("out", new BsonDocument("inline", new BsonInt32(1)));
+
+            putIfNotNull(commandDocument, "query", getFilter());
+            putIfNotNull(commandDocument, "sort", getSort());
+            putIfNotNull(commandDocument, "finalize", getFinalizeFunction());
+            putIfNotNull(commandDocument, "scope", getScope());
+            putIfTrue(commandDocument, "verbose", isVerbose());
+            appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument);
+            putIfNotZero(commandDocument, "limit", getLimit());
+            putIfTrue(commandDocument, "jsMode", isJsMode());
+            if (collation != null) {
+                commandDocument.put("collation", collation.asDocument());
+            }
+            return commandDocument;
+        };
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java
index fe58fb0bd75..c506bbda2fe 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java
@@ -22,6 +22,7 @@
 import com.mongodb.assertions.Assertions;
 import com.mongodb.bulk.BulkWriteResult;
 import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.async.function.AsyncCallbackLoop;
 import com.mongodb.internal.async.function.AsyncCallbackRunnable;
@@ -87,10 +88,10 @@ public class MixedBulkWriteOperation implements AsyncWriteOperation<BulkWriteRes
     private BsonDocument variables;
 
     public MixedBulkWriteOperation(final MongoNamespace namespace, final List<? extends WriteRequest> writeRequests,
-                                   final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) {
-        this.ordered = ordered;
+            final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) {
         this.namespace = notNull("namespace", namespace);
         this.writeRequests = notNull("writes", writeRequests);
+        this.ordered = ordered;
         this.writeConcern = notNull("writeConcern", writeConcern);
         this.retryWrites = retryWrites;
         isTrueArgument("writes is not an empty list", !writeRequests.isEmpty());
@@ -176,6 +177,7 @@ private boolean shouldAttemptToRetryWrite(final RetryState retryState, final Thr
 
     @Override
     public BulkWriteResult execute(final WriteBinding binding) {
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
         /* We cannot use the tracking of attempts built in the `RetryState` class because conceptually we have to maintain multiple attempt
          * counters while executing a single bulk write operation:
          * - a counter that limits attempts to select server and checkout a connection before we created a batch;
@@ -183,23 +185,23 @@ public BulkWriteResult execute(final WriteBinding binding) {
          * Fortunately, these counters do not exist concurrently with each other. While maintaining the counters manually,
          * we must adhere to the contract of `RetryingSyncSupplier`. When the retry timeout is implemented, there will be no counters,
          * and the code related to the attempt tracking in `BulkWriteTracker` will be removed. */
-        RetryState retryState = new RetryState();
-        BulkWriteTracker.attachNew(retryState, retryWrites);
+        RetryState retryState = new RetryState(timeoutContext);
+        BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext);
         Supplier<BulkWriteResult> retryingBulkWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () ->
             withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> {
                 ConnectionDescription connectionDescription = connection.getDescription();
                 // attach `maxWireVersion` ASAP because it is used to check whether we can retry
                 retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true);
-                SessionContext sessionContext = binding.getSessionContext();
+                SessionContext sessionContext = binding.getOperationContext().getSessionContext();
                 WriteConcern writeConcern = getAppliedWriteConcern(sessionContext);
                 if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext)) {
-                    handleMongoWriteConcernWithResponseException(retryState, true);
+                    handleMongoWriteConcernWithResponseException(retryState, true, timeoutContext);
                 }
                 validateWriteRequests(connectionDescription, bypassDocumentValidation, writeRequests, writeConcern);
                 if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) {
                     BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace,
                             connectionDescription, ordered, writeConcern,
-                            bypassDocumentValidation, retryWrites, writeRequests, sessionContext, comment, variables));
+                            bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext);
                 }
                 return executeBulkWriteBatch(retryState, binding, connection);
             })
@@ -212,9 +214,10 @@ public BulkWriteResult execute(final WriteBinding binding) {
     }
 
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<BulkWriteResult> callback) {
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
         // see the comment in `execute(WriteBinding)` explaining the manual tracking of attempts
-        RetryState retryState = new RetryState();
-        BulkWriteTracker.attachNew(retryState, retryWrites);
+        RetryState retryState = new RetryState(timeoutContext);
+        BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext);
         binding.retain();
         AsyncCallbackSupplier<BulkWriteResult> retryingBulkWrite = this.<BulkWriteResult>decorateWriteWithRetries(retryState,
                 binding.getOperationContext(),
@@ -224,10 +227,10 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall
                 ConnectionDescription connectionDescription = connection.getDescription();
                 // attach `maxWireVersion` ASAP because it is used to check whether we can retry
                 retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true);
-                SessionContext sessionContext = binding.getSessionContext();
+                SessionContext sessionContext = binding.getOperationContext().getSessionContext();
                 WriteConcern writeConcern = getAppliedWriteConcern(sessionContext);
                 if (!isRetryableWrite(retryWrites, getAppliedWriteConcern(sessionContext), connectionDescription, sessionContext)
-                        && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback)) {
+                        && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback, timeoutContext)) {
                     return;
                 }
                 if (validateWriteRequestsAndCompleteIfInvalid(connectionDescription, bypassDocumentValidation, writeRequests,
@@ -238,7 +241,7 @@ && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallba
                     if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) {
                         BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace,
                                 connectionDescription, ordered, writeConcern,
-                                bypassDocumentValidation, retryWrites, writeRequests, sessionContext, comment, variables));
+                                bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext);
                     }
                 } catch (Throwable t) {
                     releasingCallback.onResult(null, t);
@@ -255,12 +258,15 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final
                 .orElseThrow(Assertions::fail);
         BulkWriteBatch currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail);
         int maxWireVersion = connection.getDescription().getMaxWireVersion();
+        OperationContext operationContext = binding.getOperationContext();
+        TimeoutContext timeoutContext = operationContext.getTimeoutContext();
+
         while (currentBatch.shouldProcessBatch()) {
             try {
-                BsonDocument result = executeCommand(connection, currentBatch, binding);
-                if (currentBatch.getRetryWrites() && !binding.getSessionContext().hasActiveTransaction()) {
+                BsonDocument result = executeCommand(operationContext, connection, currentBatch);
+                if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) {
                     MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result,
-                            connection.getDescription().getServerAddress(), "errMsg");
+                            connection.getDescription().getServerAddress(), "errMsg", timeoutContext);
                     if (writeConcernBasedError != null) {
                         if (currentBulkWriteTracker.lastAttempt()) {
                             addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion);
@@ -271,19 +277,21 @@ private BulkWriteResult executeBulkWriteBatch(final RetryState retryState, final
                     }
                 }
                 currentBatch.addResult(result);
-                currentBulkWriteTracker = BulkWriteTracker.attachNext(retryState, currentBatch);
+                currentBulkWriteTracker = BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext);
                 currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail);
             } catch (MongoException exception) {
                 if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) {
                     addRetryableWriteErrorLabel(exception, maxWireVersion);
                 }
-                handleMongoWriteConcernWithResponseException(retryState, false);
+                handleMongoWriteConcernWithResponseException(retryState, false, timeoutContext);
                 throw exception;
             }
         }
         try {
             return currentBatch.getResult();
         } catch (MongoException e) {
+            /* if we get here, some of the batches failed on the server side,
+             * so we need to mark the last attempt to avoid retrying. */
             retryState.markAsLastAttempt();
             throw e;
         }
@@ -301,11 +309,13 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async
             if (loopState.breakAndCompleteIf(() -> !currentBatch.shouldProcessBatch(), iterationCallback)) {
                 return;
             }
-            executeCommandAsync(binding, connection, currentBatch, (result, t) -> {
+            OperationContext operationContext = binding.getOperationContext();
+            TimeoutContext timeoutContext = operationContext.getTimeoutContext();
+            executeCommandAsync(operationContext, connection, currentBatch, (result, t) -> {
                 if (t == null) {
-                    if (currentBatch.getRetryWrites() && !binding.getSessionContext().hasActiveTransaction()) {
+                    if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) {
                         MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result,
-                                connection.getDescription().getServerAddress(), "errMsg");
+                                connection.getDescription().getServerAddress(), "errMsg", binding.getOperationContext().getTimeoutContext());
                         if (writeConcernBasedError != null) {
                             if (currentBulkWriteTracker.lastAttempt()) {
                                 addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion);
@@ -319,7 +329,7 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async
                         }
                     }
                     currentBatch.addResult(result);
-                    BulkWriteTracker.attachNext(retryState, currentBatch);
+                    BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext);
                     iterationCallback.onResult(null, null);
                 } else {
                     if (t instanceof MongoException) {
@@ -327,7 +337,7 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async
                         if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) {
                             addRetryableWriteErrorLabel(exception, maxWireVersion);
                         }
-                        if (handleMongoWriteConcernWithResponseExceptionAsync(retryState, null)) {
+                        if (handleMongoWriteConcernWithResponseExceptionAsync(retryState, null, timeoutContext)) {
                             return;
                         }
                     }
@@ -345,6 +355,8 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async
                             .flatMap(BulkWriteTracker::batch).orElseThrow(Assertions::fail).getResult();
                 } catch (Throwable loopResultT) {
                     if (loopResultT instanceof MongoException) {
+                        /* if we get here, some of the batches failed on the server side,
+                         * so we need to mark the last attempt to avoid retrying. */
                         retryState.markAsLastAttempt();
                     }
                     callback.onResult(null, loopResultT);
@@ -355,7 +367,9 @@ private void executeBulkWriteBatchAsync(final RetryState retryState, final Async
         });
     }
 
-    private void handleMongoWriteConcernWithResponseException(final RetryState retryState, final boolean breakAndThrowIfDifferent) {
+    private void handleMongoWriteConcernWithResponseException(final RetryState retryState,
+                                                              final boolean breakAndThrowIfDifferent,
+                                                              final TimeoutContext timeoutContext) {
         if (!retryState.isFirstAttempt()) {
             RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null);
             boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException;
@@ -365,14 +379,15 @@ private void handleMongoWriteConcernWithResponseException(final RetryState retry
                         .batch().ifPresent(bulkWriteBatch -> {
                             bulkWriteBatch.addResult(
                                     (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse());
-                            BulkWriteTracker.attachNext(retryState, bulkWriteBatch);
+                            BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext);
                 });
             }
         }
     }
 
     private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetryState retryState,
-            @Nullable final SingleResultCallback<BulkWriteResult> callback) {
+                                                                      @Nullable final SingleResultCallback<BulkWriteResult> callback,
+                                                                      final TimeoutContext timeoutContext) {
         if (!retryState.isFirstAttempt()) {
             RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null);
             boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException;
@@ -384,7 +399,7 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta
                         .batch().ifPresent(bulkWriteBatch -> {
                             bulkWriteBatch.addResult(
                                     (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse());
-                            BulkWriteTracker.attachNext(retryState, bulkWriteBatch);
+                            BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext);
                 });
             }
         }
@@ -392,16 +407,17 @@ private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetrySta
     }
 
     @Nullable
-    private BsonDocument executeCommand(final Connection connection, final BulkWriteBatch batch, final WriteBinding binding) {
+    private BsonDocument executeCommand(final OperationContext operationContext, final Connection connection, final BulkWriteBatch batch) {
         return connection.command(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(),
-                binding, shouldAcknowledge(batch, binding.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator());
+                operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()),
+                batch.getPayload(), batch.getFieldNameValidator());
     }
 
-    private void executeCommandAsync(final AsyncWriteBinding binding, final AsyncConnection connection, final BulkWriteBatch batch,
+    private void executeCommandAsync(final OperationContext operationContext, final AsyncConnection connection, final BulkWriteBatch batch,
             final SingleResultCallback<BsonDocument> callback) {
         connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NO_OP_FIELD_NAME_VALIDATOR, null, batch.getDecoder(),
-                binding, shouldAcknowledge(batch, binding.getSessionContext()), batch.getPayload(), batch.getFieldNameValidator(),
-                callback);
+                operationContext, shouldAcknowledge(batch, operationContext.getSessionContext()),
+                batch.getPayload(), batch.getFieldNameValidator(), callback);
     }
 
     private WriteConcern getAppliedWriteConcern(final SessionContext sessionContext) {
@@ -427,20 +443,21 @@ private void addErrorLabelsToWriteConcern(final BsonDocument result, final Set<S
     public static final class BulkWriteTracker {
         private int attempt;
         private final int attempts;
+        private final boolean retryUntilTimeoutThrowsException;
         @Nullable
         private final BulkWriteBatch batch;
 
-        static void attachNew(final RetryState retryState, final boolean retry) {
-            retryState.attach(AttachmentKeys.bulkWriteTracker(), new BulkWriteTracker(retry, null), false);
+        static void attachNew(final RetryState retryState, final boolean retry, final TimeoutContext timeoutContext) {
+            retryState.attach(AttachmentKeys.bulkWriteTracker(), new BulkWriteTracker(retry, null, timeoutContext), false);
         }
 
-        static void attachNew(final RetryState retryState, final BulkWriteBatch batch) {
-            attach(retryState, new BulkWriteTracker(batch.getRetryWrites(), batch));
+        static void attachNew(final RetryState retryState, final BulkWriteBatch batch, final TimeoutContext timeoutContext) {
+            attach(retryState, new BulkWriteTracker(batch.getRetryWrites(), batch, timeoutContext));
         }
 
-        static BulkWriteTracker attachNext(final RetryState retryState, final BulkWriteBatch batch) {
+        static BulkWriteTracker attachNext(final RetryState retryState, final BulkWriteBatch batch, final TimeoutContext timeoutContext) {
             BulkWriteBatch nextBatch = batch.getNextBatch();
-            BulkWriteTracker nextTracker = new BulkWriteTracker(nextBatch.getRetryWrites(), nextBatch);
+            BulkWriteTracker nextTracker = new BulkWriteTracker(nextBatch.getRetryWrites(), nextBatch, timeoutContext);
             attach(retryState, nextTracker);
             return nextTracker;
         }
@@ -454,13 +471,17 @@ private static void attach(final RetryState retryState, final BulkWriteTracker t
             }
         }
 
-        private BulkWriteTracker(final boolean retry, @Nullable final BulkWriteBatch batch) {
+        private BulkWriteTracker(final boolean retry, @Nullable final BulkWriteBatch batch, final TimeoutContext timeoutContext) {
             attempt = 0;
             attempts = retry ? RetryState.RETRIES + 1 : 1;
             this.batch = batch;
+            this.retryUntilTimeoutThrowsException = timeoutContext.hasTimeoutMS();
         }
 
         boolean lastAttempt() {
+            if (retryUntilTimeoutThrowsException){
+                return false;
+            }
             return attempt == attempts - 1;
         }
 
diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java
index bfa1adbd97e..ac69f8742c7 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.MongoClientException;
 import com.mongodb.WriteConcern;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerDescription;
@@ -28,6 +29,7 @@
 import com.mongodb.internal.bulk.DeleteRequest;
 import com.mongodb.internal.bulk.UpdateRequest;
 import com.mongodb.internal.bulk.WriteRequest;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.internal.session.SessionContext;
@@ -186,14 +188,20 @@ static boolean canRetryWrite(final ConnectionDescription connectionDescription,
         return true;
     }
 
-    static boolean canRetryRead(final ServerDescription serverDescription, final SessionContext sessionContext) {
-        if (sessionContext.hasActiveTransaction()) {
+    static boolean canRetryRead(final ServerDescription serverDescription, final OperationContext operationContext) {
+        if (operationContext.getSessionContext().hasActiveTransaction()) {
             LOGGER.debug("retryReads set to true but in an active transaction.");
             return false;
         }
         return true;
     }
 
+    static void setNonTailableCursorMaxTimeSupplier(final TimeoutMode timeoutMode, final OperationContext operationContext) {
+        if (timeoutMode == TimeoutMode.ITERATION) {
+            operationContext.getTimeoutContext().setMaxTimeOverride(0L);
+        }
+    }
+
     /**
      * This internal exception is used to
      * <ul>
diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java
index 89a61558e59..e271f23d522 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java
@@ -21,6 +21,7 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.BulkWriteOptions;
 import com.mongodb.client.model.ClusteredIndexOptions;
 import com.mongodb.client.model.Collation;
@@ -86,7 +87,6 @@
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.lang.String.format;
 import static java.util.Collections.singletonList;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 final class Operations<TDocument> {
     private final MongoNamespace namespace;
@@ -145,12 +145,12 @@ boolean isRetryReads() {
     }
 
     CountDocumentsOperation countDocuments(final Bson filter, final CountOptions options) {
-        CountDocumentsOperation operation = new CountDocumentsOperation(assertNotNull(namespace))
+        CountDocumentsOperation operation = new CountDocumentsOperation(
+                assertNotNull(namespace))
                 .retryReads(retryReads)
                 .filter(toBsonDocument(filter))
                 .skip(options.getSkip())
                 .limit(options.getLimit())
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .collation(options.getCollation())
                 .comment(options.getComment());
         if (options.getHint() != null) {
@@ -162,9 +162,9 @@ CountDocumentsOperation countDocuments(final Bson filter, final CountOptions opt
     }
 
     EstimatedDocumentCountOperation estimatedDocumentCount(final EstimatedDocumentCountOptions options) {
-        return new EstimatedDocumentCountOperation(assertNotNull(namespace))
+        return new EstimatedDocumentCountOperation(
+                assertNotNull(namespace))
                 .retryReads(retryReads)
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .comment(options.getComment());
     }
 
@@ -185,14 +185,13 @@ <TResult> FindOperation<TResult> find(final MongoNamespace findNamespace, @Nulla
 
     private <TResult> FindOperation<TResult> createFindOperation(final MongoNamespace findNamespace, @Nullable final Bson filter,
                                                                  final Class<TResult> resultClass, final FindOptions options) {
-        FindOperation<TResult> operation = new FindOperation<>(findNamespace, codecRegistry.get(resultClass))
+        FindOperation<TResult> operation = new FindOperation<>(
+                findNamespace, codecRegistry.get(resultClass))
                 .retryReads(retryReads)
                 .filter(filter == null ? new BsonDocument() : filter.toBsonDocument(documentClass, codecRegistry))
                 .batchSize(options.getBatchSize())
                 .skip(options.getSkip())
                 .limit(options.getLimit())
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
-                .maxAwaitTime(options.getMaxAwaitTime(MILLISECONDS), MILLISECONDS)
                 .projection(toBsonDocument(options.getProjection()))
                 .sort(toBsonDocument(options.getSort()))
                 .cursorType(options.getCursorType())
@@ -205,7 +204,8 @@ private <TResult> FindOperation<TResult> createFindOperation(final MongoNamespac
                 .max(toBsonDocument(options.getMax()))
                 .returnKey(options.isReturnKey())
                 .showRecordId(options.isShowRecordId())
-                .allowDiskUse(options.isAllowDiskUse());
+                .allowDiskUse(options.isAllowDiskUse())
+                .timeoutMode(options.getTimeoutMode());
 
         if (options.getHint() != null) {
             operation.hint(toBsonDocument(options.getHint()));
@@ -215,65 +215,59 @@ private <TResult> FindOperation<TResult> createFindOperation(final MongoNamespac
         return operation;
     }
 
-    <TResult> DistinctOperation<TResult> distinct(final String fieldName, @Nullable final Bson filter,
-                                                         final Class<TResult> resultClass, final long maxTimeMS,
-                                                         final Collation collation, final BsonValue comment) {
-        return new DistinctOperation<>(assertNotNull(namespace), fieldName, codecRegistry.get(resultClass))
+    <TResult> DistinctOperation<TResult> distinct(final String fieldName, @Nullable final Bson filter, final Class<TResult> resultClass,
+            final Collation collation, final BsonValue comment) {
+        return new DistinctOperation<>(assertNotNull(namespace),
+                fieldName, codecRegistry.get(resultClass))
                 .retryReads(retryReads)
                 .filter(filter == null ? null : filter.toBsonDocument(documentClass, codecRegistry))
-                .maxTime(maxTimeMS, MILLISECONDS)
                 .collation(collation)
                 .comment(comment);
-
     }
 
     <TResult> AggregateOperation<TResult> aggregate(final List<? extends Bson> pipeline, final Class<TResult> resultClass,
-                                                    final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final Integer batchSize,
-                                                    final Collation collation, @Nullable final Bson hint, @Nullable final String hintString,
-                                                    final BsonValue comment,
-                                                    final Bson variables, final Boolean allowDiskUse,
-                                                    final AggregationLevel aggregationLevel) {
-        return new AggregateOperation<>(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)),
-                codecRegistry.get(resultClass), aggregationLevel)
+            @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize,
+            final Collation collation, @Nullable final Bson hint, @Nullable final String hintString,
+            final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) {
+        return new AggregateOperation<>(assertNotNull(namespace),
+                assertNotNull(toBsonDocumentList(pipeline)), codecRegistry.get(resultClass), aggregationLevel)
                 .retryReads(retryReads)
-                .maxTime(maxTimeMS, MILLISECONDS)
-                .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS)
                 .allowDiskUse(allowDiskUse)
                 .batchSize(batchSize)
                 .collation(collation)
                 .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null))
                 .comment(comment)
-                .let(toBsonDocument(variables));
+                .let(toBsonDocument(variables))
+                .timeoutMode(timeoutMode);
     }
 
-    AggregateToCollectionOperation aggregateToCollection(final List<? extends Bson> pipeline, final long maxTimeMS,
-            final Boolean allowDiskUse, final Boolean bypassDocumentValidation,
-            final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment,
-            final Bson variables, final AggregationLevel aggregationLevel) {
-        return new AggregateToCollectionOperation(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)),
-                readConcern, writeConcern, aggregationLevel)
-                .maxTime(maxTimeMS, MILLISECONDS)
+    AggregateToCollectionOperation aggregateToCollection(final List<? extends Bson> pipeline, @Nullable final TimeoutMode timeoutMode,
+            final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint,
+            @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) {
+        return new AggregateToCollectionOperation(assertNotNull(namespace),
+                assertNotNull(toBsonDocumentList(pipeline)), readConcern, writeConcern, aggregationLevel)
                 .allowDiskUse(allowDiskUse)
                 .bypassDocumentValidation(bypassDocumentValidation)
                 .collation(collation)
                 .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null))
                 .comment(comment)
-                .let(toBsonDocument(variables));
+                .let(toBsonDocument(variables))
+                .timeoutMode(timeoutMode);
     }
 
     @SuppressWarnings("deprecation")
     MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, final String collectionName,
                                                                 final String mapFunction, final String reduceFunction,
                                                                 @Nullable final String finalizeFunction, final Bson filter,
-                                                                final int limit, final long maxTimeMS, final boolean jsMode,
+                                                                final int limit, final boolean jsMode,
                                                                 final Bson scope, final Bson sort, final boolean verbose,
                                                                 final com.mongodb.client.model.MapReduceAction action,
                                                                 final Boolean bypassDocumentValidation, final Collation collation) {
-        MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation(assertNotNull(namespace),
-                new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), collectionName, writeConcern)
+        MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation(
+                assertNotNull(namespace), new BsonJavaScript(mapFunction),
+                new BsonJavaScript(reduceFunction), collectionName, writeConcern)
                 .filter(toBsonDocument(filter))
                 .limit(limit)
-                .maxTime(maxTimeMS, MILLISECONDS)
                 .jsMode(jsMode)
                 .scope(toBsonDocument(scope))
                 .sort(toBsonDocument(sort))
@@ -290,20 +284,15 @@ MapReduceToCollectionOperation mapReduceToCollection(final String databaseName,
     }
 
     <TResult> MapReduceWithInlineResultsOperation<TResult> mapReduce(final String mapFunction, final String reduceFunction,
-                                                                            @Nullable final String finalizeFunction,
-                                                                            final Class<TResult> resultClass,
-                                                                            final Bson filter, final int limit,
-                                                                            final long maxTimeMS, final boolean jsMode, final Bson scope,
-                                                                            final Bson sort, final boolean verbose,
-                                                                            final Collation collation) {
+            @Nullable final String finalizeFunction, final Class<TResult> resultClass, final Bson filter, final int limit,
+            final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose,
+            final Collation collation) {
         MapReduceWithInlineResultsOperation<TResult> operation =
-                new MapReduceWithInlineResultsOperation<>(assertNotNull(namespace),
-                        new BsonJavaScript(mapFunction),
-                        new BsonJavaScript(reduceFunction),
+                new MapReduceWithInlineResultsOperation<>(
+                        assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction),
                         codecRegistry.get(resultClass))
                         .filter(toBsonDocument(filter))
                         .limit(limit)
-                        .maxTime(maxTimeMS, MILLISECONDS)
                         .jsMode(jsMode)
                         .scope(toBsonDocument(scope))
                         .sort(toBsonDocument(sort))
@@ -316,11 +305,11 @@ <TResult> MapReduceWithInlineResultsOperation<TResult> mapReduce(final String ma
     }
 
     FindAndDeleteOperation<TDocument> findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) {
-        return new FindAndDeleteOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec())
+        return new FindAndDeleteOperation<>(
+                assertNotNull(namespace), writeConcern, retryWrites, getCodec())
                 .filter(toBsonDocument(filter))
                 .projection(toBsonDocument(options.getProjection()))
                 .sort(toBsonDocument(options.getSort()))
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .collation(options.getCollation())
                 .hint(toBsonDocument(options.getHint()))
                 .hintString(options.getHintString())
@@ -330,14 +319,13 @@ FindAndDeleteOperation<TDocument> findOneAndDelete(final Bson filter, final Find
 
     FindAndReplaceOperation<TDocument> findOneAndReplace(final Bson filter, final TDocument replacement,
                                                                 final FindOneAndReplaceOptions options) {
-        return new FindAndReplaceOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(),
-                documentToBsonDocument(replacement))
+        return new FindAndReplaceOperation<>(
+                assertNotNull(namespace), writeConcern, retryWrites, getCodec(), documentToBsonDocument(replacement))
                 .filter(toBsonDocument(filter))
                 .projection(toBsonDocument(options.getProjection()))
                 .sort(toBsonDocument(options.getSort()))
                 .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE)
                 .upsert(options.isUpsert())
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .bypassDocumentValidation(options.getBypassDocumentValidation())
                 .collation(options.getCollation())
                 .hint(toBsonDocument(options.getHint()))
@@ -347,14 +335,13 @@ FindAndReplaceOperation<TDocument> findOneAndReplace(final Bson filter, final TD
     }
 
     FindAndUpdateOperation<TDocument> findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) {
-        return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(),
-                assertNotNull(toBsonDocument(update)))
+        return new FindAndUpdateOperation<>(
+                assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocument(update)))
                 .filter(toBsonDocument(filter))
                 .projection(toBsonDocument(options.getProjection()))
                 .sort(toBsonDocument(options.getSort()))
                 .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE)
                 .upsert(options.isUpsert())
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .bypassDocumentValidation(options.getBypassDocumentValidation())
                 .collation(options.getCollation())
                 .arrayFilters(toBsonDocumentList(options.getArrayFilters()))
@@ -366,14 +353,13 @@ FindAndUpdateOperation<TDocument> findOneAndUpdate(final Bson filter, final Bson
 
     FindAndUpdateOperation<TDocument> findOneAndUpdate(final Bson filter, final List<? extends Bson> update,
                                                        final FindOneAndUpdateOptions options) {
-        return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(),
-                assertNotNull(toBsonDocumentList(update)))
+        return new FindAndUpdateOperation<>(
+                assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocumentList(update)))
                 .filter(toBsonDocument(filter))
                 .projection(toBsonDocument(options.getProjection()))
                 .sort(toBsonDocument(options.getSort()))
                 .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE)
                 .upsert(options.isUpsert())
-                .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                 .bypassDocumentValidation(options.getBypassDocumentValidation())
                 .collation(options.getCollation())
                 .arrayFilters(toBsonDocumentList(options.getArrayFilters()))
@@ -430,8 +416,7 @@ MixedBulkWriteOperation updateMany(final Bson filter, final List<? extends Bson>
                         .comment(options.getComment()).let(options.getLet()));
     }
 
-    MixedBulkWriteOperation insertMany(final List<? extends TDocument> documents,
-                                              final InsertManyOptions options) {
+    MixedBulkWriteOperation insertMany(final List<? extends TDocument> documents, final InsertManyOptions options) {
         notNull("documents", documents);
         List<InsertRequest> requests = new ArrayList<>(documents.size());
         for (TDocument document : documents) {
@@ -444,13 +429,14 @@ MixedBulkWriteOperation insertMany(final List<? extends TDocument> documents,
             requests.add(new InsertRequest(documentToBsonDocument(document)));
         }
 
-        return new MixedBulkWriteOperation(assertNotNull(namespace), requests, options.isOrdered(), writeConcern, retryWrites)
-                .bypassDocumentValidation(options.getBypassDocumentValidation()).comment(options.getComment());
+        return new MixedBulkWriteOperation(assertNotNull(namespace),
+                requests, options.isOrdered(), writeConcern, retryWrites)
+                .bypassDocumentValidation(options.getBypassDocumentValidation())
+                .comment(options.getComment());
     }
 
     @SuppressWarnings("unchecked")
-    MixedBulkWriteOperation bulkWrite(final List<? extends WriteModel<? extends TDocument>> requests,
-                                             final BulkWriteOptions options) {
+    MixedBulkWriteOperation bulkWrite(final List<? extends WriteModel<? extends TDocument>> requests, final BulkWriteOptions options) {
         notNull("requests", requests);
         List<WriteRequest> writeRequests = new ArrayList<>(requests.size());
         for (WriteModel<? extends TDocument> writeModel : requests) {
@@ -465,9 +451,8 @@ MixedBulkWriteOperation bulkWrite(final List<? extends WriteModel<? extends TDoc
                 writeRequest = new InsertRequest(documentToBsonDocument(document));
             } else if (writeModel instanceof ReplaceOneModel) {
                 ReplaceOneModel<TDocument> replaceOneModel = (ReplaceOneModel<TDocument>) writeModel;
-                writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), documentToBsonDocument(replaceOneModel
-                        .getReplacement()),
-                        WriteRequest.Type.REPLACE)
+                writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())),
+                        documentToBsonDocument(replaceOneModel.getReplacement()), WriteRequest.Type.REPLACE)
                         .upsert(replaceOneModel.getReplaceOptions().isUpsert())
                         .collation(replaceOneModel.getReplaceOptions().getCollation())
                         .hint(toBsonDocument(replaceOneModel.getReplaceOptions().getHint()))
@@ -512,7 +497,8 @@ MixedBulkWriteOperation bulkWrite(final List<? extends WriteModel<? extends TDoc
             writeRequests.add(writeRequest);
         }
 
-        return new MixedBulkWriteOperation(assertNotNull(namespace), writeRequests, options.isOrdered(), writeConcern, retryWrites)
+        return new MixedBulkWriteOperation(assertNotNull(namespace), writeRequests,
+                options.isOrdered(), writeConcern, retryWrites)
                 .bypassDocumentValidation(options.getBypassDocumentValidation())
                 .comment(options.getComment())
                 .let(toBsonDocument(options.getLet()));
@@ -521,20 +507,20 @@ MixedBulkWriteOperation bulkWrite(final List<? extends WriteModel<? extends TDoc
     <TResult> CommandReadOperation<TResult> commandRead(final Bson command, final Class<TResult> resultClass) {
         notNull("command", command);
         notNull("resultClass", resultClass);
-        return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), assertNotNull(toBsonDocument(command)),
-                codecRegistry.get(resultClass));
+        return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(),
+                                          assertNotNull(toBsonDocument(command)), codecRegistry.get(resultClass));
     }
 
 
     DropDatabaseOperation dropDatabase() {
-        return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), getWriteConcern());
+        return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(),
+                getWriteConcern());
     }
 
-
     CreateCollectionOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions,
             @Nullable final AutoEncryptionSettings autoEncryptionSettings) {
-        CreateCollectionOperation operation = new CreateCollectionOperation(assertNotNull(namespace).getDatabaseName(),
-                collectionName, writeConcern)
+        CreateCollectionOperation operation = new CreateCollectionOperation(
+                assertNotNull(namespace).getDatabaseName(), collectionName, writeConcern)
                 .collation(createCollectionOptions.getCollation())
                 .capped(createCollectionOptions.isCapped())
                 .sizeInBytes(createCollectionOptions.getSizeInBytes())
@@ -576,7 +562,8 @@ CreateCollectionOperation createCollection(final String collectionName, final Cr
     DropCollectionOperation dropCollection(
             final DropCollectionOptions dropCollectionOptions,
             @Nullable final AutoEncryptionSettings autoEncryptionSettings) {
-        DropCollectionOperation operation = new DropCollectionOperation(assertNotNull(namespace), writeConcern);
+        DropCollectionOperation operation = new DropCollectionOperation(
+                assertNotNull(namespace), writeConcern);
         Bson encryptedFields = dropCollectionOptions.getEncryptedFields();
         if (encryptedFields != null) {
             operation.encryptedFields(assertNotNull(toBsonDocument(encryptedFields)));
@@ -592,17 +579,17 @@ DropCollectionOperation dropCollection(
 
 
     RenameCollectionOperation renameCollection(final MongoNamespace newCollectionNamespace,
-                                                      final RenameCollectionOptions renameCollectionOptions) {
-        return new RenameCollectionOperation(assertNotNull(namespace), newCollectionNamespace, writeConcern)
-                .dropTarget(renameCollectionOptions.isDropTarget());
+            final RenameCollectionOptions renameCollectionOptions) {
+        return new RenameCollectionOperation(assertNotNull(namespace),
+                newCollectionNamespace, writeConcern).dropTarget(renameCollectionOptions.isDropTarget());
     }
 
     CreateViewOperation createView(final String viewName, final String viewOn, final List<? extends Bson> pipeline,
             final CreateViewOptions createViewOptions) {
         notNull("options", createViewOptions);
         notNull("pipeline", pipeline);
-        return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, viewOn,
-                assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation());
+        return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName,
+                viewOn, assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation());
     }
 
     CreateIndexesOperation createIndexes(final List<IndexModel> indexes, final CreateIndexOptions createIndexOptions) {
@@ -635,8 +622,8 @@ CreateIndexesOperation createIndexes(final List<IndexModel> indexes, final Creat
                     .hidden(model.getOptions().isHidden())
             );
         }
-        return new CreateIndexesOperation(assertNotNull(namespace), indexRequests, writeConcern)
-                .maxTime(createIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS)
+        return new CreateIndexesOperation(
+                assertNotNull(namespace), indexRequests, writeConcern)
                 .commitQuorum(createIndexOptions.getCommitQuorum());
     }
 
@@ -644,14 +631,12 @@ CreateSearchIndexesOperation createSearchIndexes(final List<SearchIndexModel> in
         List<SearchIndexRequest> indexRequests = indexes.stream()
                 .map(this::createSearchIndexRequest)
                 .collect(Collectors.toList());
-
         return new CreateSearchIndexesOperation(assertNotNull(namespace), indexRequests);
     }
 
     UpdateSearchIndexesOperation updateSearchIndex(final String indexName, final Bson definition) {
         BsonDocument definitionDocument = assertNotNull(toBsonDocument(definition));
         SearchIndexRequest searchIndexRequest = new SearchIndexRequest(definitionDocument, indexName);
-
         return new UpdateSearchIndexesOperation(assertNotNull(namespace), searchIndexRequest);
     }
 
@@ -662,47 +647,39 @@ DropSearchIndexOperation dropSearchIndex(final String indexName) {
 
 
     <TResult> ListSearchIndexesOperation<TResult> listSearchIndexes(final Class<TResult> resultClass,
-                                                                                      final long maxTimeMS,
-                                                                                      @Nullable final String indexName,
-                                                                                      @Nullable final Integer batchSize,
-                                                                                      @Nullable final Collation collation,
-                                                                                      @Nullable final BsonValue comment,
-                                                                                      @Nullable final Boolean allowDiskUse) {
-
-
-        return new ListSearchIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass), maxTimeMS,
-                indexName, batchSize, collation, comment, allowDiskUse, retryReads);
+            @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation,
+            @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) {
+        return new ListSearchIndexesOperation<>(assertNotNull(namespace),
+                codecRegistry.get(resultClass), indexName, batchSize, collation, comment, allowDiskUse, retryReads);
     }
 
-    DropIndexOperation dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) {
-        return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern)
-                .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS);
+    DropIndexOperation dropIndex(final String indexName, final DropIndexOptions ignoredOptions) {
+        return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern);
     }
 
-    DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) {
-        return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern)
-                .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS);
+    DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions ignoredOptions) {
+        return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern);
     }
 
     <TResult> ListCollectionsOperation<TResult> listCollections(final String databaseName, final Class<TResult> resultClass,
                                                                 final Bson filter, final boolean collectionNamesOnly,
                                                                 final boolean authorizedCollections,
-                                                                @Nullable final Integer batchSize, final long maxTimeMS,
-                                                                final BsonValue comment) {
+                                                                @Nullable final Integer batchSize,
+                                                                final BsonValue comment, @Nullable final TimeoutMode timeoutMode) {
         return new ListCollectionsOperation<>(databaseName, codecRegistry.get(resultClass))
                 .retryReads(retryReads)
                 .filter(toBsonDocument(filter))
                 .nameOnly(collectionNamesOnly)
                 .authorizedCollections(authorizedCollections)
                 .batchSize(batchSize == null ? 0 : batchSize)
-                .maxTime(maxTimeMS, MILLISECONDS)
-                .comment(comment);
+                .comment(comment)
+                .timeoutMode(timeoutMode);
     }
 
     <TResult> ListDatabasesOperation<TResult> listDatabases(final Class<TResult> resultClass, final Bson filter,
-                                                            final Boolean nameOnly, final long maxTimeMS,
+                                                            final Boolean nameOnly,
                                                             final Boolean authorizedDatabasesOnly, final BsonValue comment) {
-        return new ListDatabasesOperation<>(codecRegistry.get(resultClass)).maxTime(maxTimeMS, MILLISECONDS)
+        return new ListDatabasesOperation<>(codecRegistry.get(resultClass))
                 .retryReads(retryReads)
                 .filter(toBsonDocument(filter))
                 .nameOnly(nameOnly)
@@ -711,25 +688,28 @@ <TResult> ListDatabasesOperation<TResult> listDatabases(final Class<TResult> res
     }
 
     <TResult> ListIndexesOperation<TResult> listIndexes(final Class<TResult> resultClass, @Nullable final Integer batchSize,
-                                                               final long maxTimeMS, final BsonValue comment) {
-        return new ListIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass))
+            final BsonValue comment, @Nullable final TimeoutMode timeoutMode) {
+        return new ListIndexesOperation<>(assertNotNull(namespace),
+                codecRegistry.get(resultClass))
                 .retryReads(retryReads)
                 .batchSize(batchSize == null ? 0 : batchSize)
-                .maxTime(maxTimeMS, MILLISECONDS)
-                .comment(comment);
+                .comment(comment)
+                .timeoutMode(timeoutMode);
     }
 
     <TResult> ChangeStreamOperation<TResult> changeStream(final FullDocument fullDocument,
             final FullDocumentBeforeChange fullDocumentBeforeChange, final List<? extends Bson> pipeline,
             final Decoder<TResult> decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize,
-            final Collation collation, final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken,
+            final Collation collation, final BsonValue comment, final BsonDocument resumeToken,
             final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) {
-        return new ChangeStreamOperation<>(assertNotNull(namespace), fullDocument, fullDocumentBeforeChange,
+        return new ChangeStreamOperation<>(
+                assertNotNull(namespace),
+                fullDocument,
+                fullDocumentBeforeChange,
                 assertNotNull(toBsonDocumentList(pipeline)), decoder, changeStreamLevel)
                 .batchSize(batchSize)
                 .collation(collation)
                 .comment(comment)
-                .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS)
                 .resumeAfter(resumeToken)
                 .startAtOperationTime(startAtOperationTime)
                 .startAfter(startAfter)
@@ -773,7 +753,6 @@ private SearchIndexRequest createSearchIndexRequest(final SearchIndexModel model
         BsonDocument definition = assertNotNull(toBsonDocument(model.getDefinition()));
         String indexName = model.getName();
 
-        SearchIndexRequest indexRequest = new SearchIndexRequest(definition, indexName);
-        return indexRequest;
+        return new SearchIndexRequest(definition, indexName);
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java
index 14d61105d11..aa5d2e7d451 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java
@@ -24,6 +24,7 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public interface ReadOperation<T> {
+
     /**
      * General execute which can return anything of type T
      *
diff --git a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java
index d6f7ee897ae..fd727f2fd81 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java
@@ -53,12 +53,8 @@ public class RenameCollectionOperation implements AsyncWriteOperation<Void>, Wri
     private final WriteConcern writeConcern;
     private boolean dropTarget;
 
-    public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace) {
-        this(originalNamespace, newNamespace, null);
-    }
-
     public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace,
-                                     @Nullable final WriteConcern writeConcern) {
+            @Nullable final WriteConcern writeConcern) {
         this.originalNamespace = notNull("originalNamespace", originalNamespace);
         this.newNamespace = notNull("newNamespace", newNamespace);
         this.writeConcern = writeConcern;
@@ -79,7 +75,8 @@ public RenameCollectionOperation dropTarget(final boolean dropTarget) {
 
     @Override
     public Void execute(final WriteBinding binding) {
-        return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection, writeConcernErrorTransformer()));
+        return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection,
+                writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())));
     }
 
     @Override
@@ -90,7 +87,8 @@ public void executeAsync(final AsyncWriteBinding binding, final SingleResultCall
                 errHandlingCallback.onResult(null, t);
             } else {
                 executeCommandAsync(binding, "admin", getCommand(), assertNotNull(connection),
-                        writeConcernErrorTransformerAsync(), releasingCallback(errHandlingCallback, connection));
+                        writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()),
+                        releasingCallback(errHandlingCallback, connection));
             }
         });
     }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java
index 5610f84dd36..43334109c20 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java
@@ -18,6 +18,8 @@
 
 import com.mongodb.MongoException;
 import com.mongodb.ReadPreference;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.async.function.AsyncCallbackBiFunction;
@@ -32,6 +34,7 @@
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.operation.retry.AttachmentKeys;
+import com.mongodb.internal.session.SessionContext;
 import com.mongodb.internal.validator.NoOpFieldNameValidator;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
@@ -92,6 +95,8 @@ interface CommandWriteTransformer<T, R> {
         R apply(T t, Connection connection);
     }
 
+    private static final BsonDocumentCodec BSON_DOCUMENT_CODEC = new BsonDocumentCodec();
+
     static <T> T withReadConnectionSource(final ReadBinding binding, final CallableWithSource<T> callable) {
         ConnectionSource source = binding.getReadConnectionSource();
         try {
@@ -172,7 +177,8 @@ static <D, T> T executeRetryableRead(
             final Decoder<D> decoder,
             final CommandReadTransformer<D, T> transformer,
             final boolean retryReads) {
-        return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads);
+        return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator,
+                                    decoder, transformer, retryReads);
     }
 
     static <D, T> T executeRetryableRead(
@@ -183,22 +189,38 @@ static <D, T> T executeRetryableRead(
             final Decoder<D> decoder,
             final CommandReadTransformer<D, T> transformer,
             final boolean retryReads) {
-        RetryState retryState = CommandOperationHelper.initialRetryState(retryReads);
+        RetryState retryState = CommandOperationHelper.initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext());
+
         Supplier<T> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () ->
                 withSourceAndConnection(readConnectionSourceSupplier, false, (source, connection) -> {
-                    retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext()));
-                    return createReadCommandAndExecute(retryState, binding, source, database, commandCreator, decoder, transformer, connection);
+                    retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext()));
+                    return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, database,
+                                                       commandCreator, decoder, transformer, connection);
                 })
         );
         return read.get();
     }
 
+    @VisibleForTesting(otherwise = PRIVATE)
+    static <T> T executeCommand(final WriteBinding binding, final String database, final CommandCreator commandCreator,
+            final CommandWriteTransformer<BsonDocument, T> transformer) {
+        return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) ->
+                transformer.apply(assertNotNull(
+                        connection.command(database,
+                                commandCreator.create(binding.getOperationContext(),
+                                        source.getServerDescription(),
+                                        connection.getDescription()),
+                                new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())),
+                        connection));
+    }
+
     @VisibleForTesting(otherwise = PRIVATE)
     static <D, T> T executeCommand(final WriteBinding binding, final String database, final BsonDocument command,
                                    final Decoder<D> decoder, final CommandWriteTransformer<D, T> transformer) {
         return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) ->
                 transformer.apply(assertNotNull(
-                        connection.command(database, command, new NoOpFieldNameValidator(), primary(), decoder, binding)), connection));
+                        connection.command(database, command, new NoOpFieldNameValidator(), primary(), decoder,
+                                binding.getOperationContext())), connection));
     }
 
     @Nullable
@@ -206,7 +228,8 @@ static <T> T executeCommand(final WriteBinding binding, final String database, f
                                 final Connection connection, final CommandWriteTransformer<BsonDocument, T> transformer) {
         notNull("binding", binding);
         return transformer.apply(assertNotNull(
-                connection.command(database, command, new NoOpFieldNameValidator(), primary(), new BsonDocumentCodec(), binding)),
+                connection.command(database, command, new NoOpFieldNameValidator(), primary(), BSON_DOCUMENT_CODEC,
+                        binding.getOperationContext())),
                 connection);
     }
 
@@ -219,28 +242,30 @@ static <T, R> R executeRetryableWrite(
             final CommandCreator commandCreator,
             final CommandWriteTransformer<T, R> transformer,
             final com.mongodb.Function<BsonDocument, BsonDocument> retryCommandModifier) {
-        RetryState retryState = CommandOperationHelper.initialRetryState(true);
+        RetryState retryState = CommandOperationHelper.initialRetryState(true, binding.getOperationContext().getTimeoutContext());
         Supplier<R> retryingWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () -> {
             boolean firstAttempt = retryState.isFirstAttempt();
-            if (!firstAttempt && binding.getSessionContext().hasActiveTransaction()) {
-                binding.getSessionContext().clearTransactionContext();
+            SessionContext sessionContext = binding.getOperationContext().getSessionContext();
+            if (!firstAttempt && sessionContext.hasActiveTransaction()) {
+                sessionContext.clearTransactionContext();
             }
             return withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> {
                 int maxWireVersion = connection.getDescription().getMaxWireVersion();
                 try {
-                    retryState.breakAndThrowIfRetryAnd(() -> !canRetryWrite(connection.getDescription(), binding.getSessionContext()));
+                    retryState.breakAndThrowIfRetryAnd(() -> !canRetryWrite(connection.getDescription(), sessionContext));
                     BsonDocument command = retryState.attachment(AttachmentKeys.command())
                             .map(previousAttemptCommand -> {
                                 assertFalse(firstAttempt);
                                 return retryCommandModifier.apply(previousAttemptCommand);
-                            }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription()));
+                            }).orElseGet(() -> commandCreator.create(binding.getOperationContext(), source.getServerDescription(),
+                                    connection.getDescription()));
                     // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry
                     retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true)
                             .attach(AttachmentKeys.retryableCommandFlag(), CommandOperationHelper.isRetryWritesEnabled(command), true)
                             .attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false)
                             .attach(AttachmentKeys.command(), command, false);
                     return transformer.apply(assertNotNull(connection.command(database, command, fieldNameValidator, readPreference,
-                                    commandResultDecoder, binding)),
+                                    commandResultDecoder, binding.getOperationContext())),
                             connection);
                 } catch (MongoException e) {
                     if (!firstAttempt) {
@@ -260,17 +285,18 @@ static <T, R> R executeRetryableWrite(
     @Nullable
     static <D, T> T createReadCommandAndExecute(
             final RetryState retryState,
-            final ReadBinding binding,
+            final OperationContext operationContext,
             final ConnectionSource source,
             final String database,
             final CommandCreator commandCreator,
             final Decoder<D> decoder,
             final CommandReadTransformer<D, T> transformer,
             final Connection connection) {
-        BsonDocument command = commandCreator.create(source.getServerDescription(), connection.getDescription());
+        BsonDocument command = commandCreator.create(operationContext, source.getServerDescription(),
+                connection.getDescription());
         retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false);
         return transformer.apply(assertNotNull(connection.command(database, command, new NoOpFieldNameValidator(),
-                source.getReadPreference(), decoder, binding)), source, connection);
+                source.getReadPreference(), decoder, operationContext)), source, connection);
     }
 
 
@@ -293,11 +319,11 @@ static <R> Supplier<R> decorateReadWithRetries(final RetryState retryState, fina
     }
 
 
-    static CommandWriteTransformer<BsonDocument, Void> writeConcernErrorTransformer() {
+    static CommandWriteTransformer<BsonDocument, Void> writeConcernErrorTransformer(final TimeoutContext timeoutContext) {
         return (result, connection) -> {
             assertNotNull(result);
             throwOnWriteConcernError(result, connection.getDescription().getServerAddress(),
-                    connection.getDescription().getMaxWireVersion());
+                    connection.getDescription().getMaxWireVersion(), timeoutContext);
             return null;
         };
     }
@@ -308,9 +334,10 @@ static <T> CommandReadTransformer<BsonDocument, BatchCursor<T>> singleBatchCurso
                         connection.getDescription().getServerAddress());
     }
 
-    static <T> BatchCursor<T> cursorDocumentToBatchCursor(final BsonDocument cursorDocument, final Decoder<T> decoder,
-            final BsonValue comment, final ConnectionSource source, final Connection connection, final int batchSize) {
-        return new CommandBatchCursor<>(cursorDocument, batchSize, 0, decoder, comment, source, connection);
+    static <T> BatchCursor<T> cursorDocumentToBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument,
+            final int batchSize, final Decoder<T> decoder, final BsonValue comment, final ConnectionSource source,
+            final Connection connection) {
+        return new CommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection);
     }
 
     private SyncOperationHelper() {
diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java
index d7134cd8ad0..bf73b5dabbf 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java
@@ -22,6 +22,7 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
 import com.mongodb.bulk.BulkWriteResult;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.BulkWriteOptions;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.CountOptions;
@@ -45,6 +46,7 @@
 import com.mongodb.client.model.WriteModel;
 import com.mongodb.client.model.changestream.FullDocument;
 import com.mongodb.client.model.changestream.FullDocumentBeforeChange;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
@@ -58,27 +60,84 @@
 
 import java.util.List;
 
+import static com.mongodb.assertions.Assertions.assertNotNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public final class SyncOperations<TDocument> {
     private final Operations<TDocument> operations;
+    private final TimeoutSettings timeoutSettings;
 
     public SyncOperations(final Class<TDocument> documentClass, final ReadPreference readPreference,
-                          final CodecRegistry codecRegistry, final boolean retryReads) {
-        this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads);
+                          final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutSettings);
     }
 
     public SyncOperations(final MongoNamespace namespace, final Class<TDocument> documentClass, final ReadPreference readPreference,
-                          final CodecRegistry codecRegistry, final boolean retryReads) {
-        this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads);
+                          final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutSettings);
     }
 
     public SyncOperations(@Nullable final MongoNamespace namespace, final Class<TDocument> documentClass, final ReadPreference readPreference,
                           final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern,
-                          final boolean retryWrites, final boolean retryReads) {
-        this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern,
+                          final boolean retryWrites, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        WriteConcern writeConcernToUse = writeConcern;
+        if (timeoutSettings.getTimeoutMS() != null) {
+            writeConcernToUse = assertNotNull(WriteConcernHelper.cloneWithoutTimeout(writeConcern));
+        }
+        this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcernToUse,
                 retryWrites, retryReads);
+        this.timeoutSettings = timeoutSettings;
+    }
+
+    public TimeoutSettings createTimeoutSettings(final long maxTimeMS) {
+        return timeoutSettings.withMaxTimeMS(maxTimeMS);
+    }
+
+    public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS);
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final CountOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOptions options) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) {
+        return createTimeoutSettings(options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) {
+        return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS));
+    }
+
+    // TODO (CSOT) @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final CreateIndexOptions options) {
+        return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS));
+    }
+
+    // TODO (CSOT) @SuppressWarnings("deprecation") // MaxTime
+    public TimeoutSettings createTimeoutSettings(final DropIndexOptions options) {
+        return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS));
     }
 
     public ReadOperation<Long> countDocuments(final Bson filter, final CountOptions options) {
@@ -95,7 +154,7 @@ public <TResult> ReadOperation<BatchCursor<TResult>> findFirst(final Bson filter
     }
 
     public <TResult> ExplainableReadOperation<BatchCursor<TResult>> find(final Bson filter, final Class<TResult> resultClass,
-                                                              final FindOptions options) {
+            final FindOptions options) {
         return operations.find(filter, resultClass, options);
     }
 
@@ -105,30 +164,25 @@ public <TResult> ReadOperation<BatchCursor<TResult>> find(final MongoNamespace f
     }
 
     public <TResult> ReadOperation<BatchCursor<TResult>> distinct(final String fieldName, final Bson filter,
-                                                                  final Class<TResult> resultClass, final long maxTimeMS,
+                                                                  final Class<TResult> resultClass,
                                                                   final Collation collation, final BsonValue comment) {
-        return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment);
+        return operations.distinct(fieldName, filter, resultClass, collation, comment);
     }
 
     public <TResult> ExplainableReadOperation<BatchCursor<TResult>> aggregate(final List<? extends Bson> pipeline,
-                                                                              final Class<TResult> resultClass,
-                                                                              final long maxTimeMS, final long maxAwaitTimeMS,
-                                                                              @Nullable final Integer batchSize,
-                                                                              final Collation collation, final Bson hint,
-                                                                              final String hintString,
-                                                                              final BsonValue comment,
-                                                                              final Bson variables,
-                                                                              final Boolean allowDiskUse,
-                                                                              final AggregationLevel aggregationLevel) {
-        return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, batchSize, collation, hint, hintString, comment,
-                variables, allowDiskUse, aggregationLevel);
-    }
-
-    public ReadOperation<Void> aggregateToCollection(final List<? extends Bson> pipeline, final long maxTimeMS,
-            final Boolean allowDiskUse, final Boolean bypassDocumentValidation,
-            final Collation collation, final Bson hint, final String hintString, final BsonValue comment,
+            final Class<TResult> resultClass,
+            @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize,
+            final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables,
+            final Boolean allowDiskUse, final AggregationLevel aggregationLevel) {
+        return operations.aggregate(pipeline, resultClass, timeoutMode, batchSize, collation, hint, hintString,
+                comment, variables, allowDiskUse, aggregationLevel);
+    }
+
+    public AggregateToCollectionOperation aggregateToCollection(final List<? extends Bson> pipeline,
+            @Nullable final TimeoutMode timeoutMode, final Boolean allowDiskUse, final Boolean bypassDocumentValidation,
+            final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment,
             final Bson variables, final AggregationLevel aggregationLevel) {
-        return operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString,
+        return operations.aggregateToCollection(pipeline, timeoutMode, allowDiskUse, bypassDocumentValidation, collation, hint, hintString,
                 comment, variables, aggregationLevel);
     }
 
@@ -136,21 +190,21 @@ public ReadOperation<Void> aggregateToCollection(final List<? extends Bson> pipe
     public WriteOperation<MapReduceStatistics> mapReduceToCollection(final String databaseName, final String collectionName,
                                                                      final String mapFunction, final String reduceFunction,
                                                                      final String finalizeFunction, final Bson filter, final int limit,
-                                                                     final long maxTimeMS, final boolean jsMode, final Bson scope,
+                                                                     final boolean jsMode, final Bson scope,
                                                                      final Bson sort, final boolean verbose,
                                                                      final com.mongodb.client.model.MapReduceAction action,
                                                                      final Boolean bypassDocumentValidation, final Collation collation) {
         return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, limit,
-                maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation);
+                jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation);
     }
 
     public <TResult> ReadOperation<MapReduceBatchCursor<TResult>> mapReduce(final String mapFunction, final String reduceFunction,
                                                                             final String finalizeFunction, final Class<TResult> resultClass,
                                                                             final Bson filter, final int limit,
-                                                                            final long maxTimeMS, final boolean jsMode, final Bson scope,
+                                                                            final boolean jsMode, final Bson scope,
                                                                             final Bson sort, final boolean verbose,
                                                                             final Collation collation) {
-        return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, maxTimeMS, jsMode, scope,
+        return operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, resultClass, filter, limit, jsMode, scope,
                 sort, verbose, collation);
     }
 
@@ -225,7 +279,6 @@ public WriteOperation<Void> dropDatabase() {
         return operations.dropDatabase();
     }
 
-
     public WriteOperation<Void> createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions,
             @Nullable final AutoEncryptionSettings autoEncryptionSettings) {
         return operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings);
@@ -263,14 +316,9 @@ public WriteOperation<Void> dropSearchIndex(final String indexName) {
 
 
     public <TResult> ExplainableReadOperation<BatchCursor<TResult>> listSearchIndexes(final Class<TResult> resultClass,
-                                                                           final long maxTimeMS,
-                                                                           @Nullable final String indexName,
-                                                                           @Nullable final Integer batchSize,
-                                                                           @Nullable final Collation collation,
-                                                                           @Nullable final BsonValue comment,
-                                                                           @Nullable final Boolean allowDiskUse) {
-        return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, batchSize, collation,
-               comment, allowDiskUse);
+            @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation,
+            @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) {
+        return operations.listSearchIndexes(resultClass, indexName, batchSize, collation, comment, allowDiskUse);
     }
 
     public WriteOperation<Void> dropIndex(final String indexName, final DropIndexOptions options) {
@@ -284,29 +332,30 @@ public WriteOperation<Void> dropIndex(final Bson keys, final DropIndexOptions op
     public <TResult> ReadOperation<BatchCursor<TResult>> listCollections(final String databaseName, final Class<TResult> resultClass,
                                                                          final Bson filter, final boolean collectionNamesOnly,
                                                                          final boolean authorizedCollections,
-                                                                         @Nullable final Integer batchSize, final long maxTimeMS,
-                                                                         final BsonValue comment) {
+                                                                         @Nullable final Integer batchSize,
+                                                                         final BsonValue comment, @Nullable final TimeoutMode timeoutMode) {
         return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections,
-                batchSize, maxTimeMS, comment);
+                batchSize, comment, timeoutMode);
+
     }
 
     public <TResult> ReadOperation<BatchCursor<TResult>> listDatabases(final Class<TResult> resultClass, final Bson filter,
-                                                                       final Boolean nameOnly, final long maxTimeMS,
+                                                                       final Boolean nameOnly,
                                                                        final Boolean authorizedDatabases, final BsonValue comment) {
-        return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabases, comment);
+        return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabases, comment);
     }
 
     public <TResult> ReadOperation<BatchCursor<TResult>> listIndexes(final Class<TResult> resultClass, @Nullable final Integer batchSize,
-                                                                     final long maxTimeMS, final BsonValue comment) {
-        return operations.listIndexes(resultClass, batchSize, maxTimeMS, comment);
+            final BsonValue comment, @Nullable final TimeoutMode timeoutMode) {
+        return operations.listIndexes(resultClass, batchSize, comment, timeoutMode);
     }
 
     public <TResult> ReadOperation<BatchCursor<TResult>> changeStream(final FullDocument fullDocument,
             final FullDocumentBeforeChange fullDocumentBeforeChange, final List<? extends Bson> pipeline, final Decoder<TResult> decoder,
             final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, final Collation collation,
-            final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime,
+            final BsonValue comment, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime,
             final BsonDocument startAfter, final boolean showExpandedEvents) {
         return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize,
-                collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
+                collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java
index 499623ebcce..3bb04efa8ed 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.Function;
 import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.binding.WriteBinding;
@@ -54,21 +55,25 @@ public WriteConcern getWriteConcern() {
 
     @Override
     public Void execute(final WriteBinding binding) {
-        isTrue("in transaction", binding.getSessionContext().hasActiveTransaction());
+        isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction());
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
         return executeRetryableWrite(binding, "admin", null, new NoOpFieldNameValidator(),
-                new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformer(), getRetryCommandModifier());
+                                     new BsonDocumentCodec(), getCommandCreator(),
+                writeConcernErrorTransformer(timeoutContext), getRetryCommandModifier(timeoutContext));
     }
 
     @Override
     public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback<Void> callback) {
-        isTrue("in transaction", binding.getSessionContext().hasActiveTransaction());
+        isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction());
+        TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext();
         executeRetryableWriteAsync(binding, "admin", null, new NoOpFieldNameValidator(),
-                new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformerAsync(), getRetryCommandModifier(),
-                errorHandlingCallback(callback, LOGGER));
+                                   new BsonDocumentCodec(), getCommandCreator(),
+                writeConcernErrorTransformerAsync(timeoutContext), getRetryCommandModifier(timeoutContext),
+                                   errorHandlingCallback(callback, LOGGER));
     }
 
     CommandCreator getCommandCreator() {
-        return (serverDescription, connectionDescription) -> {
+        return (operationContext, serverDescription, connectionDescription) -> {
             BsonDocument command = new BsonDocument(getCommandName(), new BsonInt32(1));
             if (!writeConcern.isServerDefault()) {
                 command.put("writeConcern", writeConcern.asDocument());
@@ -84,5 +89,5 @@ CommandCreator getCommandCreator() {
      */
     protected abstract String getCommandName();
 
-    protected abstract Function<BsonDocument, BsonDocument> getRetryCommandModifier();
+    protected abstract Function<BsonDocument, BsonDocument> getRetryCommandModifier(TimeoutContext timeoutContext);
 }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java
index a9e1a1e8ee6..10b02eda4fe 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java
@@ -22,11 +22,14 @@
 import com.mongodb.WriteConcern;
 import com.mongodb.WriteConcernResult;
 import com.mongodb.bulk.WriteConcernError;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.connection.ProtocolHelper;
+import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
 import org.bson.BsonDocument;
 import org.bson.BsonString;
 
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel;
@@ -41,10 +44,26 @@ public static void appendWriteConcernToCommand(final WriteConcern writeConcern,
             commandDocument.put("writeConcern", writeConcern.asDocument());
         }
     }
+    @Nullable
+    public static WriteConcern cloneWithoutTimeout(@Nullable final WriteConcern writeConcern) {
+        if (writeConcern == null || writeConcern.getWTimeout(TimeUnit.MILLISECONDS) == null) {
+            return writeConcern;
+        }
+
+        WriteConcern mapped;
+        Object w = writeConcern.getWObject();
+        if (w == null) {
+            mapped = WriteConcern.ACKNOWLEDGED;
+        } else {
+            mapped = w instanceof Integer ? new WriteConcern((Integer) w) : new WriteConcern((String) w);
+        }
+        return mapped.withJournal(writeConcern.getJournal());
+    }
 
-    public static void throwOnWriteConcernError(final BsonDocument result, final ServerAddress serverAddress, final int maxWireVersion) {
+    public static void throwOnWriteConcernError(final BsonDocument result, final ServerAddress serverAddress,
+                                                final int maxWireVersion, final TimeoutContext timeoutContext) {
         if (hasWriteConcernError(result)) {
-            MongoException exception = ProtocolHelper.createSpecialException(result, serverAddress, "errmsg");
+            MongoException exception = ProtocolHelper.createSpecialException(result, serverAddress, "errmsg", timeoutContext);
             if (exception == null) {
                 exception = createWriteConcernException(result, serverAddress);
             }
diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java
index a2e34985179..1a4fee36e1c 100644
--- a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java
+++ b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java
@@ -24,6 +24,7 @@
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public interface WriteOperation<T> {
+
     /**
      * General execute which can return anything of type T
      *
diff --git a/driver-core/src/main/com/mongodb/internal/package-info.java b/driver-core/src/main/com/mongodb/internal/package-info.java
index 2f7f9b396cf..e7825fe1292 100644
--- a/driver-core/src/main/com/mongodb/internal/package-info.java
+++ b/driver-core/src/main/com/mongodb/internal/package-info.java
@@ -15,7 +15,6 @@
  */
 
 /**
- * This package contains classes that manage binding to MongoDB servers for various operations.
  */
 
 @NonNullApi
diff --git a/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java
index ca2023b4d3d..80f88cc08f5 100644
--- a/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java
+++ b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java
@@ -19,6 +19,10 @@
 import com.mongodb.ClientSessionOptions;
 import com.mongodb.MongoClientException;
 import com.mongodb.ServerAddress;
+import com.mongodb.TransactionOptions;
+import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.lang.Nullable;
 import com.mongodb.session.ClientSession;
@@ -26,10 +30,12 @@
 import org.bson.BsonDocument;
 import org.bson.BsonTimestamp;
 
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import static com.mongodb.assertions.Assertions.assertTrue;
 import static com.mongodb.assertions.Assertions.isTrue;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -48,6 +54,16 @@ public class BaseClientSessionImpl implements ClientSession {
     private ServerAddress pinnedServerAddress;
     private BsonDocument recoveryToken;
     private ReferenceCounted transactionContext;
+    @Nullable
+    private TimeoutContext timeoutContext;
+
+    protected static boolean hasTimeoutMS(@Nullable final TimeoutContext timeoutContext) {
+        return timeoutContext != null && timeoutContext.hasTimeoutMS();
+    }
+
+    protected static boolean hasWTimeoutMS(@Nullable final WriteConcern writeConcern) {
+        return writeConcern != null && writeConcern.getWTimeout(TimeUnit.MILLISECONDS) != null;
+    }
 
     public BaseClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options) {
         this.serverSessionPool = serverSessionPool;
@@ -193,4 +209,37 @@ public void close() {
             clearTransactionContext();
         }
     }
+
+    @Override
+    @Nullable
+    public TimeoutContext getTimeoutContext() {
+        return timeoutContext;
+    }
+
+    protected void setTimeoutContext(@Nullable final TimeoutContext timeoutContext) {
+        this.timeoutContext = timeoutContext;
+    }
+
+    protected void resetTimeout() {
+        if (timeoutContext != null) {
+            timeoutContext.resetTimeoutIfPresent();
+        }
+    }
+
+    protected TimeoutSettings getTimeoutSettings(final TransactionOptions transactionOptions, final TimeoutSettings timeoutSettings) {
+        Long transactionTimeoutMS = transactionOptions.getTimeout(MILLISECONDS);
+        Long defaultTimeoutMS = getOptions().getDefaultTimeout(MILLISECONDS);
+        Long clientTimeoutMS =  timeoutSettings.getTimeoutMS();
+
+        Long timeoutMS = transactionTimeoutMS != null ? transactionTimeoutMS
+                : defaultTimeoutMS != null ? defaultTimeoutMS : clientTimeoutMS;
+
+        return timeoutSettings
+                .withMaxCommitMS(transactionOptions.getMaxCommitTime(MILLISECONDS))
+                .withTimeout(timeoutMS, MILLISECONDS);
+    }
+
+    protected enum TransactionState {
+        NONE, IN, COMMITTED, ABORTED
+    }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java
index 35268e68f13..6f118f0eddb 100644
--- a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java
+++ b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java
@@ -22,7 +22,8 @@
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.IgnorableRequestContext;
-import com.mongodb.internal.binding.StaticBindingContext;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.NoOpSessionContext;
@@ -59,21 +60,26 @@ public class ServerSessionPool {
     private final Cluster cluster;
     private final ServerSessionPool.Clock clock;
     private volatile boolean closed;
-    @Nullable
-    private final ServerApi serverApi;
+    private final OperationContext operationContext;
     private final LongAdder inUseCount = new LongAdder();
 
     interface Clock {
         long millis();
     }
 
-    public ServerSessionPool(final Cluster cluster, @Nullable final ServerApi serverApi) {
-        this(cluster, serverApi, System::currentTimeMillis);
+    public ServerSessionPool(final Cluster cluster, final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) {
+        this(cluster,
+                new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                        new TimeoutContext(timeoutSettings.connectionOnly()), serverApi));
     }
 
-    public ServerSessionPool(final Cluster cluster, @Nullable final ServerApi serverApi, final Clock clock) {
+    public ServerSessionPool(final Cluster cluster, final OperationContext operationContext) {
+        this(cluster, operationContext, System::currentTimeMillis);
+    }
+
+    public ServerSessionPool(final Cluster cluster, final OperationContext operationContext, final Clock clock) {
         this.cluster = cluster;
-        this.serverApi = serverApi;
+        this.operationContext = operationContext;
         this.clock = clock;
     }
 
@@ -128,8 +134,6 @@ private void endClosedSessions() {
 
         Connection connection = null;
         try {
-            StaticBindingContext context = new StaticBindingContext(NoOpSessionContext.INSTANCE, serverApi,
-                    IgnorableRequestContext.INSTANCE, new OperationContext());
             connection = cluster.selectServer(
                     new ServerSelector() {
                         @Override
@@ -149,11 +153,11 @@ public String toString() {
                                     + '}';
                         }
                     },
-                    context.getOperationContext()).getServer().getConnection(context.getOperationContext());
+                    operationContext).getServer().getConnection(operationContext);
 
             connection.command("admin",
                     new BsonDocument("endSessions", new BsonArray(identifiers)), new NoOpFieldNameValidator(),
-                    ReadPreference.primaryPreferred(), new BsonDocumentCodec(), context);
+                    ReadPreference.primaryPreferred(), new BsonDocumentCodec(), operationContext);
         } catch (MongoException e) {
             // ignore exceptions
         } finally {
diff --git a/driver-core/src/main/com/mongodb/internal/time/StartTime.java b/driver-core/src/main/com/mongodb/internal/time/StartTime.java
new file mode 100644
index 00000000000..905af2265d9
--- /dev/null
+++ b/driver-core/src/main/com/mongodb/internal/time/StartTime.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.time;
+
+import java.time.Duration;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A point in time used to track how much time has elapsed. In contrast to a
+ * Timeout, it is guaranteed to not be in the future, and is never infinite.
+ *
+ * @see TimePoint
+ */
+public interface StartTime {
+
+    /**
+     * @see TimePoint#elapsed()
+     */
+    Duration elapsed();
+
+    /**
+     * @see TimePoint#asTimeout()
+     */
+    Timeout asTimeout();
+
+    /**
+     * Returns an {@linkplain Timeout#infinite() infinite} timeout if
+     * {@code timeoutValue} is negative, an expired timeout if
+     * {@code timeoutValue} is 0, otherwise a timeout in {@code durationNanos}.
+     * <p>
+     * Note that some code might ignore a timeout, and attempt to perform
+     * the operation in question at least once.</p>
+     * <p>
+     * Note that the contract of this method is also used in some places to
+     * specify the behavior of methods that accept {@code (long timeout, TimeUnit unit)},
+     * e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)},
+     * so it cannot be changed without updating those methods.</p>
+     *
+     * @see TimePoint#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)
+     */
+    Timeout timeoutAfterOrInfiniteIfNegative(long timeoutValue, TimeUnit timeUnit);
+
+    /**
+     * @return a StartPoint, as of now
+     */
+    static StartTime now() {
+        return TimePoint.at(System.nanoTime());
+    }
+}
diff --git a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java
index 78859802150..102dfb2d609 100644
--- a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java
+++ b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java
@@ -17,74 +17,183 @@
 
 import com.mongodb.annotations.Immutable;
 import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.internal.function.CheckedFunction;
+import com.mongodb.internal.function.CheckedSupplier;
+import com.mongodb.lang.Nullable;
 
 import java.time.Clock;
 import java.time.Duration;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 
+import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 /**
  * A <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/doc-files/ValueBased.html">value-based</a> class
- * representing a point on a timeline. The origin of this timeline has no known relation to the
- * {@linkplain  Clock#systemUTC() system clock}. The same timeline is used by all {@link TimePoint}s within the same process.
+ * representing a point on a timeline. The origin of this timeline (which is not
+ * exposed) has no relation to the {@linkplain  Clock#systemUTC() system clock}.
+ * The same timeline is used by all {@link TimePoint}s within the same process.
  * <p>
  * Methods operating on a pair of {@link TimePoint}s,
  * for example, {@link #durationSince(TimePoint)}, {@link #compareTo(TimePoint)},
  * or producing a point from another one, for example, {@link #add(Duration)},
- * work correctly only if the duration between the points is not greater than {@link Long#MAX_VALUE} nanoseconds,
- * which is more than 292 years.</p>
+ * work correctly only if the duration between the points is not greater than
+ * {@link Long#MAX_VALUE} nanoseconds, which is more than 292 years.</p>
  * <p>
  * This class is not part of the public API and may be removed or changed at any time.</p>
  */
 @Immutable
-public final class TimePoint implements Comparable<TimePoint> {
-    private final long nanos;
+class TimePoint implements Comparable<TimePoint>, StartTime, Timeout {
+    @Nullable
+    private final Long nanos;
 
-    private TimePoint(final long nanos) {
+    TimePoint(@Nullable final Long nanos) {
         this.nanos = nanos;
     }
 
+    @VisibleForTesting(otherwise = PRIVATE)
+    static TimePoint at(@Nullable final Long nanos) {
+        return new TimePoint(nanos);
+    }
+
+    @VisibleForTesting(otherwise = PRIVATE)
+    long currentNanos() {
+        return System.nanoTime();
+    }
+
     /**
      * Returns the current {@link TimePoint}.
      */
-    public static TimePoint now() {
+    static TimePoint now() {
         return at(System.nanoTime());
     }
 
-    @VisibleForTesting(otherwise = PRIVATE)
-    static TimePoint at(final long nanos) {
-        return new TimePoint(nanos);
+    /**
+     * Returns a {@link TimePoint} infinitely far in the future.
+     */
+    static TimePoint infinite() {
+        return at(null);
+    }
+
+    @Override
+    public Timeout shortenBy(final long amount, final TimeUnit timeUnit) {
+        if (isInfinite()) {
+            return this; // shortening (lengthening) an infinite timeout does nothing
+        }
+        long durationNanos = NANOSECONDS.convert(amount, timeUnit);
+        return TimePoint.at(assertNotNull(nanos) - durationNanos);
+    }
+
+    @Override
+    public <T, E extends Exception> T checkedCall(final TimeUnit timeUnit,
+            final CheckedSupplier<T, E> onInfinite, final CheckedFunction<Long, T, E> onHasRemaining,
+            final CheckedSupplier<T, E> onExpired) throws E {
+        if (this.isInfinite()) {
+            return onInfinite.get();
+        }
+        long remaining = remaining(timeUnit);
+        if (remaining <= 0) {
+            return onExpired.get();
+        } else {
+            return onHasRemaining.apply(remaining);
+        }
     }
 
     /**
-     * The {@link Duration} between this {@link TimePoint} and {@code t}.
-     * A {@linkplain Duration#isNegative() negative} {@link Duration} means that
-     * this {@link TimePoint} is {@linkplain #compareTo(TimePoint) before} {@code t}.
+     * @return true if this timepoint is infinite.
+     */
+    private boolean isInfinite() {
+        return nanos == null;
+    }
+
+    /**
+     * @return this TimePoint, as a Timeout. Convenience for {@link StartTime}
+     */
+    @Override
+    public Timeout asTimeout() {
+        return this;
+    }
+
+    /**
+     * The number of whole time units that remain until this TimePoint
+     * has expired. This should not be used to check for expiry,
+     * but can be used to supply a remaining value, in the finest-grained
+     * TimeUnit available, to some method that may time out.
+     * This method must not be used with infinite TimePoints.
      *
-     * @see #elapsed()
+     * @param unit the time unit
+     * @return the remaining time
+     * @throws AssertionError if the timeout is infinite. Always check if the
+     * timeout {@link #isInfinite()} before calling.
      */
-    public Duration durationSince(final TimePoint t) {
-        return Duration.ofNanos(nanos - t.nanos);
+    private long remaining(final TimeUnit unit) {
+        if (isInfinite()) {
+            throw new AssertionError("Infinite TimePoints have infinite remaining time");
+        }
+        long remaining = assertNotNull(nanos) - currentNanos();
+        remaining = unit.convert(remaining, NANOSECONDS);
+        return remaining <= 0 ? 0 : remaining;
     }
 
     /**
      * The {@link Duration} between {@link TimePoint#now()} and this {@link TimePoint}.
      * This method is functionally equivalent to {@code TimePoint.now().durationSince(this)}.
+     * Note that the duration will represent fully-elapsed whole units.
      *
+     * @throws AssertionError If this TimePoint is {@linkplain #isInfinite() infinite}.
      * @see #durationSince(TimePoint)
      */
     public Duration elapsed() {
-        return Duration.ofNanos(System.nanoTime() - nanos);
+        if (isInfinite()) {
+            throw new AssertionError("No time can elapse since an infinite TimePoint");
+        }
+        return Duration.ofNanos(currentNanos() - assertNotNull(nanos));
     }
 
+    /**
+     * The {@link Duration} between this {@link TimePoint} and {@code t}.
+     * A {@linkplain Duration#isNegative() negative} {@link Duration} means that
+     * this {@link TimePoint} is {@linkplain #compareTo(TimePoint) before} {@code t}.
+     *
+     * @see #elapsed()
+     */
+    Duration durationSince(final TimePoint t) {
+        if (this.isInfinite()) {
+            throw new AssertionError("this timepoint is infinite, with no duration since");
+        }
+        if (t.isInfinite()) {
+            throw new AssertionError("the other timepoint is infinite, with no duration until");
+        }
+        return Duration.ofNanos(nanos - assertNotNull(t.nanos));
+    }
+
+    /**
+     * @param timeoutValue value; if negative, the result is infinite
+     * @param timeUnit timeUnit
+     * @return a TimePoint that is the given number of timeUnits in the future
+     */
+    @Override
+    public TimePoint timeoutAfterOrInfiniteIfNegative(final long timeoutValue, final TimeUnit timeUnit) {
+        if (timeoutValue < 0) {
+            return infinite();
+        }
+        return this.add(Duration.ofNanos(NANOSECONDS.convert(timeoutValue, timeUnit)));
+    }
+
+
     /**
      * Returns a {@link TimePoint} that is {@code duration} away from this one.
      *
      * @param duration A duration that may also be {@linkplain Duration#isNegative() negative}.
      */
-    public TimePoint add(final Duration duration) {
+    TimePoint add(final Duration duration) {
+        if (isInfinite()) {
+            throw new AssertionError("No time can be added to an infinite TimePoint");
+        }
         long durationNanos = duration.toNanos();
-        return TimePoint.at(nanos + durationNanos);
+        return TimePoint.at(assertNotNull(nanos) + durationNanos);
     }
 
     /**
@@ -94,7 +203,14 @@ public TimePoint add(final Duration duration) {
      */
     @Override
     public int compareTo(final TimePoint t) {
-        return Long.signum(nanos - t.nanos);
+        if (Objects.equals(nanos, t.nanos)) {
+            return 0;
+        } else if (this.isInfinite()) {
+            return 1;
+        } else if (t.isInfinite()) {
+            return -1;
+        }
+        return Long.signum(nanos - assertNotNull(t.nanos));
     }
 
     @Override
@@ -106,18 +222,22 @@ public boolean equals(final Object o) {
             return false;
         }
         final TimePoint timePoint = (TimePoint) o;
-        return nanos == timePoint.nanos;
+        return Objects.equals(nanos, timePoint.nanos);
     }
 
     @Override
     public int hashCode() {
-        return Long.hashCode(nanos);
+        return Objects.hash(nanos);
     }
 
     @Override
     public String toString() {
+        String remainingMs = isInfinite()
+                ? "infinite"
+                : "" + TimeUnit.MILLISECONDS.convert(currentNanos() - assertNotNull(nanos), NANOSECONDS);
         return "TimePoint{"
                 + "nanos=" + nanos
+                + "remainingMs=" + remainingMs
                 + '}';
     }
 }
diff --git a/driver-core/src/main/com/mongodb/internal/time/Timeout.java b/driver-core/src/main/com/mongodb/internal/time/Timeout.java
index f0d4bbf3ea1..85b92d9fde1 100644
--- a/driver-core/src/main/com/mongodb/internal/time/Timeout.java
+++ b/driver-core/src/main/com/mongodb/internal/time/Timeout.java
@@ -15,245 +15,229 @@
  */
 package com.mongodb.internal.time;
 
-import com.mongodb.annotations.Immutable;
-import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.MongoInterruptedException;
+import com.mongodb.assertions.Assertions;
+import com.mongodb.internal.function.CheckedConsumer;
+import com.mongodb.internal.function.CheckedFunction;
+import com.mongodb.internal.function.CheckedRunnable;
+import com.mongodb.internal.function.CheckedSupplier;
 import com.mongodb.lang.Nullable;
+import org.jetbrains.annotations.NotNull;
 
-import java.util.Objects;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.function.LongConsumer;
+import java.util.function.LongFunction;
+import java.util.function.Supplier;
 
-import static com.mongodb.assertions.Assertions.assertFalse;
-import static com.mongodb.assertions.Assertions.assertNotNull;
-import static com.mongodb.assertions.Assertions.assertTrue;
-import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 /**
- * A <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/doc-files/ValueBased.html">value-based</a> class
- * for tracking timeouts.
- * <p>
- * This class is not part of the public API and may be removed or changed at any time.</p>
+ * A Timeout is a "deadline", point in time by which something must happen.
+ *
+ * @see TimePoint
  */
-@Immutable
-public final class Timeout {
-    private static final Timeout INFINITE = new Timeout(-1, null);
-    private static final Timeout IMMEDIATE = new Timeout(0, null);
-
-    private final long durationNanos;
-    /**
-     * {@code null} iff {@code this} is {@linkplain #isInfinite() infinite} or {@linkplain #isImmediate() immediate}.
-     */
-    @Nullable
-    private final TimePoint start;
-
-    private Timeout(final long durationNanos, @Nullable final TimePoint start) {
-        this.durationNanos = durationNanos;
-        this.start = start;
-    }
-
-    /**
-     * Converts the specified {@code duration} from {@code unit}s to {@link TimeUnit#NANOSECONDS}
-     * as specified by {@link TimeUnit#toNanos(long)} and then acts identically to {@link #started(long, TimePoint)}.
-     * <p>
-     * Note that the contract of this method is also used in some places to specify the behavior of methods that accept
-     * {@code (long timeout, TimeUnit unit)}, e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)},
-     * so it cannot be changed without updating those methods.</p>
-     */
-    public static Timeout started(final long duration, final TimeUnit unit, final TimePoint at) {
-        return started(unit.toNanos(duration), assertNotNull(at));
-    }
-
-    /**
-     * Returns an {@linkplain #isInfinite() infinite} timeout if {@code durationNanos} is either negative
-     * or is equal to {@link Long#MAX_VALUE},
-     * an {@linkplain #isImmediate() immediate} timeout if {@code durationNanos} is 0,
-     * otherwise a timeout of {@code durationNanos}.
-     * <p>
-     * Note that the contract of this method is also used in some places to specify the behavior of methods that accept
-     * {@code (long timeout, TimeUnit unit)}, e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)},
-     * so it cannot be changed without updating those methods.</p>
-     */
-    public static Timeout started(final long durationNanos, final TimePoint at) {
-        if (durationNanos < 0 || durationNanos == Long.MAX_VALUE) {
-            return infinite();
-        } else if (durationNanos == 0) {
-            return immediate();
-        } else {
-            return new Timeout(durationNanos, assertNotNull(at));
-        }
-    }
-
+public interface Timeout {
     /**
-     * This method acts identically to {@link #started(long, TimeUnit, TimePoint)}
-     * with the {@linkplain TimePoint#now() current} {@link TimePoint} passed to it.
+     * @param timeouts the timeouts
+     * @return the instance of the timeout that expires earliest
      */
-    public static Timeout startNow(final long duration, final TimeUnit unit) {
-        return started(duration, unit, TimePoint.now());
+    static Timeout earliest(final Timeout... timeouts) {
+        List<Timeout> list = Arrays.asList(timeouts);
+        list.forEach(v -> {
+            if (!(v instanceof TimePoint)) {
+                throw new AssertionError("Only TimePoints may be compared");
+            }
+        });
+        return Collections.min(list, (a, b) -> {
+            TimePoint tpa = (TimePoint) a;
+            TimePoint tpb = (TimePoint) b;
+            return tpa.compareTo(tpb);
+        });
     }
 
     /**
-     * This method acts identically to {@link #started(long, TimePoint)}
-     * with the {@linkplain TimePoint#now() current} {@link TimePoint} passed to it.
+     * @return an infinite (non-expiring) timeout
      */
-    public static Timeout startNow(final long durationNanos) {
-        return started(durationNanos, TimePoint.now());
+    static Timeout infinite() {
+        return TimePoint.infinite();
     }
 
     /**
-     * @see #started(long, TimePoint)
+     * @param timeout the timeout
+     * @return the provided timeout, or an infinite timeout if provided null.
      */
-    public static Timeout infinite() {
-        return INFINITE;
+    static Timeout nullAsInfinite(@Nullable final Timeout timeout) {
+        return timeout == null ? infinite() : timeout;
     }
 
     /**
-     * @see #started(long, TimePoint)
+     * @param duration the non-negative duration, in the specified time unit
+     * @param unit the time unit
+     * @param zeroSemantics what to interpret a 0 duration as (infinite or expired)
+     * @return a timeout that expires in the specified duration after now.
      */
-    public static Timeout immediate() {
-        return IMMEDIATE;
-    }
-
-    /**
-     * Returns 0 or a positive value.
-     * 0 means that the timeout has expired.
-     *
-     * @throws AssertionError If the timeout is {@linkplain #isInfinite() infinite} or {@linkplain #isImmediate() immediate}.
-     */
-    @VisibleForTesting(otherwise = PRIVATE)
-    long remainingNanos(final TimePoint now) {
-        return Math.max(0, durationNanos - now.durationSince(assertNotNull(start)).toNanos());
+    @NotNull
+    static Timeout expiresIn(final long duration, final TimeUnit unit, final ZeroSemantics zeroSemantics) {
+        if (duration < 0) {
+            throw new AssertionError("Timeouts must not be in the past");
+        } else if (duration == 0) {
+            switch (zeroSemantics) {
+                case ZERO_DURATION_MEANS_INFINITE:
+                    return Timeout.infinite();
+                case ZERO_DURATION_MEANS_EXPIRED:
+                    return TimePoint.now();
+                default:
+                    throw Assertions.fail("Unknown enum value");
+            }
+        } else {
+            // duration will never be negative
+            return TimePoint.now().timeoutAfterOrInfiniteIfNegative(duration, unit);
+        }
     }
 
     /**
-     * Returns 0 or a positive value converted to the specified {@code unit}s.
-     * Use {@link #expired(long)} to check if the returned value signifies that a timeout is expired.
-     *
-     * @param unit If not {@link TimeUnit#NANOSECONDS}, then coarsening conversion is done that may result in returning a value
-     * that represents a longer time duration than is actually remaining (this is done to prevent treating a timeout as
-     * {@linkplain #expired(long) expired} when it is not). Consequently, one should specify {@code unit} as small as
-     * practically possible. Such rounding up happens if and only if the remaining time cannot be
-     * represented exactly as an integral number of the {@code unit}s specified. It may result in
-     * {@link #expired()} returning {@code true} and after that (in the happens-before order)
-     * {@link #expired(long) expired}{@code (}{@link #remaining(TimeUnit) remaining(...)}{@code )}
-     * returning {@code false}. If such a discrepancy is observed,
-     * the result of the {@link #expired()} method should be preferred.
+     * This timeout, shortened by the provided amount (it will expire sooner).
      *
-     * @throws AssertionError If the timeout is {@linkplain #isInfinite() infinite}.
-     * @see #remainingOrInfinite(TimeUnit)
-     */
-    public long remaining(final TimeUnit unit) {
-        assertFalse(isInfinite());
-        return isImmediate() ? 0 : convertRoundUp(remainingNanos(TimePoint.now()), unit);
+     * @param amount the amount to shorten by
+     * @param timeUnit the time unit of the amount
+     * @return the shortened timeout
+     */
+    Timeout shortenBy(long amount, TimeUnit timeUnit);
+
+    /**
+     * {@linkplain Condition#awaitNanos(long) Awaits} on the provided
+     * condition. Will {@linkplain Condition#await() await} without a waiting
+     * time if this timeout is infinite.
+     * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not
+     * checked by this method, and should be called outside of this method.
+     * @param condition the condition.
+     * @param action supplies the name of the action, for {@link MongoInterruptedException}
+     */
+    default void awaitOn(final Condition condition, final Supplier<String> action) {
+        try {
+            // ignore result, the timeout will track this remaining time
+            //noinspection ResultOfMethodCallIgnored
+            checkedRun(NANOSECONDS,
+                    () -> condition.await(),
+                    (ns) -> condition.awaitNanos(ns),
+                    () -> condition.awaitNanos(0));
+        } catch (InterruptedException e) {
+            throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e);
+        }
     }
 
     /**
-     * Returns a negative value for {@linkplain #isInfinite() infinite} timeouts,
-     * otherwise behaves identically to {@link #remaining(TimeUnit)}.
-     * Use {@link #expired(long)} to check if the returned value signifies that a timeout is expired.
-     *
-     * @see #remaining(TimeUnit)
-     */
-    public long remainingOrInfinite(final TimeUnit unit) {
-        return isInfinite() ? -1 : remaining(unit);
+     * {@linkplain CountDownLatch#await(long, TimeUnit) Awaits} on the provided
+     * condition. Will {@linkplain CountDownLatch#await() await} without a waiting
+     * time if this timeout is infinite.
+     * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not
+     * checked by this method, and should be called outside of this method.
+     * @param latch the latch.
+     * @param action supplies the name of the action, for {@link MongoInterruptedException}
+     */
+    default void awaitOn(final CountDownLatch latch, final Supplier<String> action) {
+        try {
+            // ignore result, the timeout will track this remaining time
+            //noinspection ResultOfMethodCallIgnored
+            checkedRun(NANOSECONDS,
+                    () -> latch.await(),
+                    (ns) -> latch.await(ns, NANOSECONDS),
+                    () -> latch.await(0, NANOSECONDS));
+        } catch (InterruptedException e) {
+            throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e);
+        }
     }
 
     /**
-     * @see #expired(long)
+     * Call one of 3 possible branches depending on the state of the timeout,
+     * and return the result.
+     * @param timeUnit the positive (non-zero) remaining time to provide to the
+     *                 {@code onHasRemaining} branch. The underlying nano time
+     *                 is rounded down to the given time unit. If 0, the timeout
+     *                 is considered expired.
+     * @param onInfinite branch to take when the timeout is infinite
+     * @param onHasRemaining branch to take when there is positive remaining
+     *                       time in the specified time unit
+     * @param onExpired branch to take when the timeout is expired
+     * @return the result provided by the branch
+     * @param <T> the type of the result
      */
-    public boolean expired() {
-        return expired(remainingOrInfinite(NANOSECONDS));
+    default <T> T call(final TimeUnit timeUnit,
+            final Supplier<T> onInfinite, final LongFunction<T> onHasRemaining,
+            final Supplier<T> onExpired) {
+        return checkedCall(timeUnit, onInfinite::get, onHasRemaining::apply, onExpired::get);
     }
 
     /**
-     * Returns {@code true} if and only if the {@code remaining} time is 0 (the time unit is irrelevant).
-     *
-     * @see #remaining(TimeUnit)
-     * @see #remainingOrInfinite(TimeUnit)
-     * @see #expired()
+     * Call, but throwing a checked exception.
+     * @see #call(TimeUnit, Supplier, LongFunction, Supplier)
+     * @param <E> the checked exception type
+     * @throws E the checked exception
      */
-    public static boolean expired(final long remaining) {
-        return remaining == 0;
-    }
+    <T, E extends Exception> T checkedCall(TimeUnit timeUnit,
+            CheckedSupplier<T, E> onInfinite, CheckedFunction<Long, T, E> onHasRemaining,
+            CheckedSupplier<T, E> onExpired) throws E;
 
     /**
-     * @return {@code true} if and only if the timeout duration is considered to be infinite.
+     * Run one of 3 possible branches depending on the state of the timeout.
+     * @see #call(TimeUnit, Supplier, LongFunction, Supplier)
      */
-    public boolean isInfinite() {
-        return equals(INFINITE);
+    default void run(final TimeUnit timeUnit,
+            final Runnable onInfinite, final LongConsumer onHasRemaining,
+            final Runnable onExpired) {
+        this.call(timeUnit, () -> {
+            onInfinite.run();
+            return null;
+        }, (t) -> {
+            onHasRemaining.accept(t);
+            return null;
+        }, () -> {
+            onExpired.run();
+            return null;
+        });
     }
 
     /**
-     * @return {@code true} if and only if the timeout duration is 0.
+     * Run, but throwing a checked exception.
+     * @see #checkedCall(TimeUnit, CheckedSupplier, CheckedFunction, CheckedSupplier)
      */
-    public boolean isImmediate() {
-        return equals(IMMEDIATE);
+    default <E extends Exception> void checkedRun(final TimeUnit timeUnit,
+            final CheckedRunnable<E> onInfinite, final CheckedConsumer<Long, E> onHasRemaining,
+            final CheckedRunnable<E> onExpired) throws E {
+        this.checkedCall(timeUnit, () -> {
+            onInfinite.run();
+            return null;
+        }, (t) -> {
+            onHasRemaining.accept(t);
+            return null;
+        }, () -> {
+            onExpired.run();
+            return null;
+        });
     }
 
-    @Override
-    public boolean equals(final Object o) {
-        if (this == o) {
-            return true;
-        }
-        if (o == null || getClass() != o.getClass()) {
-            return false;
-        }
-        Timeout other = (Timeout) o;
-        return durationNanos == other.durationNanos && Objects.equals(start, other.start());
-    }
-
-    @Override
-    public int hashCode() {
-        return Objects.hash(durationNanos, start);
+    default void onExpired(final Runnable onExpired) {
+        onExistsAndExpired(this, onExpired);
     }
 
-    /**
-     * This method is useful for debugging.
-     *
-     * @see #toUserString()
-     */
-    @Override
-    public String toString() {
-        return "Timeout{"
-                + "durationNanos=" + durationNanos
-                + ", start=" + start
-                + '}';
-    }
-
-    /**
-     * Returns a user-friendly representation. Examples: 1500 ms, infinite, 0 ms (immediate).
-     *
-     * @see #toString()
-     */
-    public String toUserString() {
-        if (isInfinite()) {
-            return "infinite";
-        } else if (isImmediate()) {
-            return "0 ms (immediate)";
-        } else {
-            return convertRoundUp(durationNanos, MILLISECONDS) + " ms";
+    static void onExistsAndExpired(@Nullable final Timeout t, final Runnable onExpired) {
+        if (t == null) {
+            return;
         }
+        t.run(NANOSECONDS,
+                () -> {},
+                (ns) -> {},
+                () -> onExpired.run());
     }
 
-    @VisibleForTesting(otherwise = PRIVATE)
-    long durationNanos() {
-        return durationNanos;
-    }
-
-    @VisibleForTesting(otherwise = PRIVATE)
-    @Nullable
-    TimePoint start() {
-        return start;
-    }
-
-    @VisibleForTesting(otherwise = PRIVATE)
-    static long convertRoundUp(final long nonNegativeNanos, final TimeUnit unit) {
-        assertTrue(nonNegativeNanos >= 0);
-        if (unit == NANOSECONDS) {
-            return nonNegativeNanos;
-        } else {
-            long trimmed = unit.convert(nonNegativeNanos, NANOSECONDS);
-            return NANOSECONDS.convert(trimmed, unit) < nonNegativeNanos ? trimmed + 1 : trimmed;
-        }
+    enum ZeroSemantics {
+        ZERO_DURATION_MEANS_EXPIRED,
+        ZERO_DURATION_MEANS_INFINITE
     }
 }
diff --git a/driver-core/src/main/com/mongodb/session/ClientSession.java b/driver-core/src/main/com/mongodb/session/ClientSession.java
index c6f4c8dcb60..072e6d90905 100644
--- a/driver-core/src/main/com/mongodb/session/ClientSession.java
+++ b/driver-core/src/main/com/mongodb/session/ClientSession.java
@@ -19,6 +19,7 @@
 import com.mongodb.ClientSessionOptions;
 import com.mongodb.ServerAddress;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonTimestamp;
@@ -168,4 +169,18 @@ public interface ClientSession extends Closeable {
 
     @Override
     void close();
+
+    /**
+     * Gets the timeout context to use with this session:
+     *
+     * <ul>
+     *   <li>{@code MongoClientSettings#getTimeoutMS}</li>
+     *   <li>{@code ClientSessionOptions#getDefaultTimeout}</li>
+     * </ul>
+     * <p>For internal use only </p>
+     * @return the timeout to use
+     * @since 5.2
+     */
+    @Nullable
+    TimeoutContext getTimeoutContext();
 }
diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java
index 920a2c2ac09..a889856f394 100644
--- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java
+++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java
@@ -30,16 +30,20 @@
 import com.mongodb.connection.SslSettings;
 import com.mongodb.connection.TransportSettings;
 import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncClusterBinding;
 import com.mongodb.internal.binding.AsyncConnectionSource;
+import com.mongodb.internal.binding.AsyncOperationContextBinding;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.AsyncReadWriteBinding;
 import com.mongodb.internal.binding.AsyncSessionBinding;
 import com.mongodb.internal.binding.AsyncSingleConnectionBinding;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.binding.ClusterBinding;
+import com.mongodb.internal.binding.OperationContextBinding;
 import com.mongodb.internal.binding.ReadWriteBinding;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.internal.binding.SessionBinding;
@@ -50,7 +54,10 @@
 import com.mongodb.internal.connection.DefaultClusterFactory;
 import com.mongodb.internal.connection.DefaultInetAddressResolver;
 import com.mongodb.internal.connection.InternalConnectionPoolSettings;
+import com.mongodb.internal.connection.InternalOperationContextFactory;
 import com.mongodb.internal.connection.MongoCredentialWithCache;
+import com.mongodb.internal.connection.OperationContext;
+import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext;
 import com.mongodb.internal.connection.SocketStreamFactory;
 import com.mongodb.internal.connection.StreamFactory;
 import com.mongodb.internal.connection.StreamFactoryFactory;
@@ -94,9 +101,10 @@
 import static com.mongodb.internal.connection.ClusterDescriptionHelper.getSecondaries;
 import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static java.lang.String.format;
-import static java.lang.Thread.sleep;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assume.assumeThat;
@@ -118,7 +126,20 @@ public final class ClusterFixture {
     private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest";
     private static final int COMMAND_NOT_FOUND_ERROR_CODE = 59;
     public static final long TIMEOUT = 60L;
-    public static final Duration TIMEOUT_DURATION = Duration.ofMinutes(1);
+    public static final Duration TIMEOUT_DURATION = Duration.ofSeconds(TIMEOUT);
+
+    public static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(5));
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(TIMEOUT, SECONDS);
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(0L, MILLISECONDS);
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME = TIMEOUT_SETTINGS.withMaxTimeMS(100);
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME = TIMEOUT_SETTINGS.withMaxAwaitTimeMS(101);
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME =
+            TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001);
+
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS =
+            TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001).withMaxCommitMS(999L);
+    public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_COMMIT = TIMEOUT_SETTINGS.withMaxCommitMS(999L);
+
     public static final String LEGACY_HELLO = "isMaster";
 
     private static ConnectionString connectionString;
@@ -164,12 +185,28 @@ public static ServerVersion getServerVersion() {
         if (serverVersion == null) {
             serverVersion = getVersion(new CommandReadOperation<>("admin",
                     new BsonDocument("buildInfo", new BsonInt32(1)), new BsonDocumentCodec())
-                    .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(),
-                            IgnorableRequestContext.INSTANCE)));
+                    .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT)));
         }
         return serverVersion;
     }
 
+    public static final OperationContext OPERATION_CONTEXT = new OperationContext(
+            IgnorableRequestContext.INSTANCE,
+            new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT),
+            new TimeoutContext(TIMEOUT_SETTINGS),
+            getServerApi());
+
+    public static final InternalOperationContextFactory OPERATION_CONTEXT_FACTORY =
+            new InternalOperationContextFactory(TIMEOUT_SETTINGS, getServerApi());
+
+    public static OperationContext createOperationContext(final TimeoutSettings timeoutSettings) {
+        return new OperationContext(
+                IgnorableRequestContext.INSTANCE,
+                new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT),
+                new TimeoutContext(timeoutSettings),
+                getServerApi());
+    }
+
     private static ServerVersion getVersion(final BsonDocument buildInfoResult) {
         List<BsonValue> versionArray = buildInfoResult.getArray("versionArray").subList(0, 3);
 
@@ -208,7 +245,8 @@ public static boolean hasEncryptionTestsEnabled() {
     }
 
     public static Document getServerStatus() {
-        return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), new DocumentCodec())
+        return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)),
+                new DocumentCodec())
                 .execute(getBinding());
     }
 
@@ -272,8 +310,8 @@ public static synchronized ConnectionString getConnectionString() {
                 new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), SslSettings.builder().build()));
         try {
             BsonDocument helloResult = new CommandReadOperation<>("admin",
-                    new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()).execute(new ClusterBinding(cluster,
-                    ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE));
+                    new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec())
+                    .execute(new ClusterBinding(cluster, ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT));
             if (helloResult.containsKey("setName")) {
                 connectionString = new ConnectionString(DEFAULT_URI + "/?replicaSet="
                         + helloResult.getString("setName").getValue());
@@ -316,29 +354,49 @@ private static ConnectionString getConnectionStringFromSystemProperty(final Stri
         return null;
     }
 
+    public static ReadWriteBinding getBinding() {
+        return getBinding(getCluster());
+    }
+
     public static ReadWriteBinding getBinding(final Cluster cluster) {
-        return new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE);
+        return new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT);
     }
 
-    public static ReadWriteBinding getBinding() {
-        return getBinding(getCluster(), ReadPreference.primary());
+    public static ReadWriteBinding getBinding(final TimeoutSettings timeoutSettings) {
+        return getBinding(getCluster(), ReadPreference.primary(), createNewOperationContext(timeoutSettings));
+    }
+
+    public static ReadWriteBinding getBinding(final OperationContext operationContext) {
+       return getBinding(getCluster(), ReadPreference.primary(), operationContext);
     }
 
     public static ReadWriteBinding getBinding(final ReadPreference readPreference) {
-        return getBinding(getCluster(), readPreference);
+        return getBinding(getCluster(), readPreference, OPERATION_CONTEXT);
+    }
+
+    public static OperationContext createNewOperationContext(final TimeoutSettings timeoutSettings) {
+        return new OperationContext(OPERATION_CONTEXT.getId(),
+                OPERATION_CONTEXT.getRequestContext(),
+                OPERATION_CONTEXT.getSessionContext(),
+                new TimeoutContext(timeoutSettings),
+                OPERATION_CONTEXT.getServerApi());
     }
 
-    private static ReadWriteBinding getBinding(final Cluster cluster, final ReadPreference readPreference) {
+    private static ReadWriteBinding getBinding(final Cluster cluster,
+            final ReadPreference readPreference,
+            final OperationContext operationContext) {
         if (!BINDING_MAP.containsKey(readPreference)) {
-            ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, getServerApi(),
-                    IgnorableRequestContext.INSTANCE));
+            ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT,
+                    operationContext));
             BINDING_MAP.put(readPreference, binding);
         }
-        return BINDING_MAP.get(readPreference);
+        ReadWriteBinding readWriteBinding = BINDING_MAP.get(readPreference);
+        return new OperationContextBinding(readWriteBinding,
+                operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext()));
     }
 
     public static SingleConnectionBinding getSingleConnectionBinding() {
-        return new SingleConnectionBinding(getCluster(), ReadPreference.primary(), getServerApi());
+        return new SingleConnectionBinding(getCluster(), ReadPreference.primary(), OPERATION_CONTEXT);
     }
 
     public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding() {
@@ -346,29 +404,41 @@ public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding() {
     }
 
     public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding(final Cluster cluster) {
-        return new AsyncSingleConnectionBinding(cluster, 20, SECONDS, getServerApi());
+        return new AsyncSingleConnectionBinding(cluster,  ReadPreference.primary(), OPERATION_CONTEXT);
     }
 
     public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster) {
-        return new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                IgnorableRequestContext.INSTANCE);
+        return new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT);
     }
 
     public static AsyncReadWriteBinding getAsyncBinding() {
-        return getAsyncBinding(getAsyncCluster(), ReadPreference.primary());
+        return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), OPERATION_CONTEXT);
+    }
+
+    public static AsyncReadWriteBinding getAsyncBinding(final TimeoutSettings timeoutSettings) {
+        return getAsyncBinding(createNewOperationContext(timeoutSettings));
+    }
+
+    public static AsyncReadWriteBinding getAsyncBinding(final OperationContext operationContext) {
+        return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), operationContext);
     }
 
     public static AsyncReadWriteBinding getAsyncBinding(final ReadPreference readPreference) {
-        return getAsyncBinding(getAsyncCluster(), readPreference);
+        return getAsyncBinding(getAsyncCluster(), readPreference, OPERATION_CONTEXT);
     }
 
-    public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster, final ReadPreference readPreference) {
+    public static AsyncReadWriteBinding getAsyncBinding(
+            final Cluster cluster,
+            final ReadPreference readPreference,
+            final OperationContext operationContext) {
         if (!ASYNC_BINDING_MAP.containsKey(readPreference)) {
             AsyncReadWriteBinding binding = new AsyncSessionBinding(new AsyncClusterBinding(cluster, readPreference, ReadConcern.DEFAULT,
-                    getServerApi(), IgnorableRequestContext.INSTANCE));
+                    operationContext));
             ASYNC_BINDING_MAP.put(readPreference, binding);
         }
-        return ASYNC_BINDING_MAP.get(readPreference);
+        AsyncReadWriteBinding readWriteBinding = ASYNC_BINDING_MAP.get(readPreference);
+        return new AsyncOperationContextBinding(readWriteBinding,
+                operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext()));
     }
 
     public static synchronized Cluster getCluster() {
@@ -402,16 +472,17 @@ private static Cluster createCluster(final MongoCredential credential, final Str
         return new DefaultClusterFactory().createCluster(ClusterSettings.builder().hosts(asList(getPrimary())).build(),
                 ServerSettings.builder().build(),
                 ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(),
-                streamFactory, streamFactory, credential, LoggerSettings.builder().build(), null, null, null,
-                Collections.emptyList(), getServerApi(), null);
+                TIMEOUT_SETTINGS.connectionOnly(), streamFactory, TIMEOUT_SETTINGS.connectionOnly(), streamFactory, credential,
+                LoggerSettings.builder().build(), null, null, null, Collections.emptyList(), getServerApi(), null);
     }
 
     private static Cluster createCluster(final ConnectionString connectionString, final StreamFactory streamFactory) {
-        return new DefaultClusterFactory().createCluster(ClusterSettings.builder().applyConnectionString(connectionString).build(),
-                ServerSettings.builder().build(),
-                ConnectionPoolSettings.builder().applyConnectionString(connectionString).build(),
-                InternalConnectionPoolSettings.builder().build(),
-                streamFactory,
+        MongoClientSettings mongoClientSettings = MongoClientSettings.builder().applyConnectionString(connectionString).build();
+
+        return new DefaultClusterFactory().createCluster(mongoClientSettings.getClusterSettings(),
+                mongoClientSettings.getServerSettings(), mongoClientSettings.getConnectionPoolSettings(),
+                InternalConnectionPoolSettings.builder().build(), TimeoutSettings.create(mongoClientSettings).connectionOnly(),
+                streamFactory, TimeoutSettings.createHeartbeatSettings(mongoClientSettings).connectionOnly(),
                 new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().readTimeout(5, SECONDS).build(),
                         getSslSettings(connectionString)),
                 connectionString.getCredential(),
@@ -475,32 +546,40 @@ public static SslSettings getSslSettings(final ConnectionString connectionString
         return SslSettings.builder().applyConnectionString(connectionString).build();
     }
 
-    public static ServerAddress getPrimary() {
+    public static ServerDescription getPrimaryServerDescription() {
         List<ServerDescription> serverDescriptions = getPrimaries(getClusterDescription(getCluster()));
         while (serverDescriptions.isEmpty()) {
-            try {
-                sleep(100);
-            } catch (InterruptedException e) {
-                throw new RuntimeException(e);
-            }
+            sleep(100);
             serverDescriptions = getPrimaries(getClusterDescription(getCluster()));
         }
-        return serverDescriptions.get(0).getAddress();
+        return serverDescriptions.get(0);
+    }
+
+    public static ServerAddress getPrimary() {
+        return getPrimaryServerDescription().getAddress();
+    }
+
+    public static long getPrimaryRTT() {
+        return MILLISECONDS.convert(getPrimaryServerDescription().getRoundTripTimeNanos(), NANOSECONDS);
     }
 
     public static ServerAddress getSecondary() {
         List<ServerDescription> serverDescriptions = getSecondaries(getClusterDescription(getCluster()));
         while (serverDescriptions.isEmpty()) {
-            try {
-                sleep(100);
-            } catch (InterruptedException e) {
-                throw new RuntimeException(e);
-            }
+            sleep(100);
             serverDescriptions = getSecondaries(getClusterDescription(getCluster()));
         }
         return serverDescriptions.get(0).getAddress();
     }
 
+    public static void sleep(final int sleepMS) {
+        try {
+            Thread.sleep(sleepMS);
+        } catch (InterruptedException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
     @Nullable
     public static MongoCredential getCredential() {
         return getConnectionString().getCredential();
@@ -518,8 +597,7 @@ public static MongoCredentialWithCache getCredentialWithCache() {
     public static BsonDocument getServerParameters() {
         if (serverParameters == null) {
             serverParameters = new CommandReadOperation<>("admin",
-                    new BsonDocument("getParameter", new BsonString("*")),
-                    new BsonDocumentCodec())
+                    new BsonDocument("getParameter", new BsonString("*")), new BsonDocumentCodec())
                     .execute(getBinding());
         }
         return serverParameters;
@@ -599,7 +677,8 @@ public static void disableFailPoint(final String failPoint) {
             BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString(failPoint))
                     .append("mode", new BsonString("off"));
             try {
-                new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()).execute(getBinding());
+                new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec())
+                        .execute(getBinding());
             } catch (MongoCommandException e) {
                 // ignore
             }
@@ -743,7 +822,7 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference
                 if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) {
                     return count;
                 }
-                sleep(10);
+                Thread.sleep(10);
                 count = referenceCounted.getCount();
             } catch (InterruptedException e) {
                 throw interruptAndCreateMongoInterruptedException("Interrupted", e);
@@ -755,4 +834,11 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference
     public static ClusterSettings.Builder setDirectConnection(final ClusterSettings.Builder builder) {
         return builder.mode(ClusterConnectionMode.SINGLE).hosts(singletonList(getPrimary()));
     }
+
+    public static int applyTimeoutMultiplierForServerless(final int timeoutMs) {
+        if (ClusterFixture.isServerlessTest()) {
+            return timeoutMs * 2;
+        }
+        return timeoutMs;
+    }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy
index 372fdd4b82d..adf707b9cb7 100644
--- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy
@@ -45,7 +45,6 @@ import com.mongodb.internal.binding.WriteBinding
 import com.mongodb.internal.bulk.InsertRequest
 import com.mongodb.internal.connection.AsyncConnection
 import com.mongodb.internal.connection.Connection
-import com.mongodb.internal.connection.OperationContext
 import com.mongodb.internal.connection.ServerHelper
 import com.mongodb.internal.connection.SplittablePayload
 import com.mongodb.internal.operation.AsyncReadOperation
@@ -64,6 +63,7 @@ import spock.lang.Specification
 
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.TIMEOUT
 import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget
 import static com.mongodb.ClusterFixture.executeAsync
@@ -109,13 +109,14 @@ class OperationFunctionalSpecification extends Specification {
     }
 
     void acknowledgeWrite(final SingleConnectionBinding binding) {
-        new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false).execute(binding)
+        new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true,
+                ACKNOWLEDGED, false).execute(binding)
         binding.release()
     }
 
     void acknowledgeWrite(final AsyncSingleConnectionBinding binding) {
-        executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false),
-                binding)
+        executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())],
+                true, ACKNOWLEDGED, false), binding)
         binding.release()
     }
 
@@ -142,7 +143,9 @@ class OperationFunctionalSpecification extends Specification {
 
     def executeWithSession(operation, boolean async) {
         def executor = async ? ClusterFixture.&executeAsync : ClusterFixture.&executeSync
-        def binding = async ? new AsyncSessionBinding(getAsyncBinding()) : new SessionBinding(getBinding())
+        def binding = async ?
+                new AsyncSessionBinding(getAsyncBinding())
+                : new SessionBinding(getBinding())
         executor(operation, binding)
     }
 
@@ -270,7 +273,11 @@ class OperationFunctionalSpecification extends Specification {
                           BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false,
                           ReadPreference readPreference=ReadPreference.primary(), Boolean retryable = false,
                           ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) {
-        def operationContext = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
+                .withSessionContext(Stub(SessionContext) {
+                    hasActiveTransaction() >> activeTransaction
+                    getReadConcern() >> readConcern
+                })
         def connection = Mock(Connection) {
             _ * getDescription() >> Stub(ConnectionDescription) {
                 getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion)
@@ -283,7 +290,6 @@ class OperationFunctionalSpecification extends Specification {
                 connection
             }
             getOperationContext() >> operationContext
-            getServerApi() >> null
             getReadPreference() >> readPreference
             getServerDescription() >> {
                 def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED)
@@ -296,23 +302,11 @@ class OperationFunctionalSpecification extends Specification {
         def readBinding = Stub(ReadBinding) {
             getReadConnectionSource(*_) >> connectionSource
             getReadPreference() >> readPreference
-            getServerApi() >> null
             getOperationContext() >> operationContext
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> activeTransaction
-                getReadConcern() >> readConcern
-            }
         }
         def writeBinding = Stub(WriteBinding) {
             getWriteConnectionSource() >> connectionSource
-            getServerApi() >> null
             getOperationContext() >> operationContext
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> activeTransaction
-                getReadConcern() >> readConcern
-            }
         }
 
         if (retryable) {
@@ -356,7 +350,11 @@ class OperationFunctionalSpecification extends Specification {
                            Boolean checkCommand = true, BsonDocument expectedCommand = null, Boolean checkSecondaryOk = false,
                            ReadPreference readPreference = ReadPreference.primary(), Boolean retryable = false,
                            ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) {
-        def operationContext = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
+                .withSessionContext(Stub(SessionContext) {
+                    hasActiveTransaction() >> activeTransaction
+                    getReadConcern() >> readConcern
+                })
         def connection = Mock(AsyncConnection) {
             _ * getDescription() >> Stub(ConnectionDescription) {
                 getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion)
@@ -367,7 +365,6 @@ class OperationFunctionalSpecification extends Specification {
         def connectionSource = Stub(AsyncConnectionSource) {
             getConnection(_) >> { it[0].onResult(connection, null) }
             getReadPreference() >> readPreference
-            getServerApi() >> null
             getOperationContext() >> operationContext
             getServerDescription() >> {
                 def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED)
@@ -380,23 +377,11 @@ class OperationFunctionalSpecification extends Specification {
         def readBinding = Stub(AsyncReadBinding) {
             getReadConnectionSource(*_) >> { it.last().onResult(connectionSource, null) }
             getReadPreference() >> readPreference
-            getServerApi() >> null
             getOperationContext() >> operationContext
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> activeTransaction
-                getReadConcern() >> readConcern
-            }
         }
         def writeBinding = Stub(AsyncWriteBinding) {
             getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
-            getServerApi() >> null
             getOperationContext() >> operationContext
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> activeTransaction
-                getReadConcern() >> readConcern
-            }
         }
         def callback = new FutureResultCallback()
 
@@ -458,6 +443,13 @@ class OperationFunctionalSpecification extends Specification {
             }
         }
 
+        def operationContext = OPERATION_CONTEXT.withSessionContext(
+                Stub(SessionContext) {
+                    hasSession() >> true
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
+
         def connectionSource = Stub(ConnectionSource) {
             getConnection() >> {
                 if (serverVersions.isEmpty()){
@@ -466,16 +458,11 @@ class OperationFunctionalSpecification extends Specification {
                     connection
                 }
             }
-            getServerApi() >> null
+            getOperationContext() >> operationContext
         }
         def writeBinding = Stub(WriteBinding) {
             getWriteConnectionSource() >> connectionSource
-            getServerApi() >> null
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
 
         1 * connection.command(*_) >> {
@@ -499,8 +486,14 @@ class OperationFunctionalSpecification extends Specification {
             }
         }
 
+        def operationContext = OPERATION_CONTEXT.withSessionContext(
+                Stub(SessionContext) {
+                    hasSession() >> true
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
+
         def connectionSource = Stub(AsyncConnectionSource) {
-            getServerApi() >> null
             getConnection(_) >> {
                 if (serverVersions.isEmpty()) {
                     it[0].onResult(null,
@@ -509,16 +502,12 @@ class OperationFunctionalSpecification extends Specification {
                     it[0].onResult(connection, null)
                 }
             }
+            getOperationContext() >> operationContext
         }
 
         def writeBinding = Stub(AsyncWriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
         def callback = new FutureResultCallback()
 
diff --git a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java
index 4c045001b10..23be2ccc3ab 100644
--- a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java
+++ b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java
@@ -29,6 +29,7 @@
 import org.bson.BsonInt32;
 import org.bson.BsonInt64;
 import org.bson.BsonString;
+import org.bson.BsonType;
 import org.bson.BsonValue;
 import org.bson.codecs.BsonDocumentCodec;
 import org.bson.codecs.BsonValueCodecProvider;
@@ -117,11 +118,11 @@ static boolean isWriteCommand(final String commandName) {
         return asList("insert", "update", "delete").contains(commandName);
     }
 
-    public static void assertEventsEquality(final List<CommandEvent> expectedEvents, final List<CommandEvent> events) {
+    public static void assertEventsEquality(final List<CommandEvent> expectedEvents, final List<? extends CommandEvent> events) {
         assertEventsEquality(expectedEvents, events, null);
     }
 
-    public static void assertEventsEquality(final List<CommandEvent> expectedEvents, final List<CommandEvent> events,
+    public static void assertEventsEquality(final List<CommandEvent> expectedEvents, final List<? extends CommandEvent> events,
                                             @Nullable final Map<String, BsonDocument> lsidMap) {
         assertEquals(expectedEvents.size(), events.size());
 
@@ -221,25 +222,33 @@ private static CommandSucceededEvent massageActualCommandSucceededEvent(final Co
     private static CommandStartedEvent massageActualCommandStartedEvent(final CommandStartedEvent event,
                                                                         @Nullable final Map<String, BsonDocument> lsidMap,
                                                                         final CommandStartedEvent expectedCommandStartedEvent) {
-        BsonDocument command = getWritableCloneOfCommand(event.getCommand());
+        BsonDocument actualCommand = getWritableCloneOfCommand(event.getCommand());
+        BsonDocument expectedCommand = expectedCommandStartedEvent.getCommand();
 
-        massageCommand(event, command);
+        massageCommand(event, actualCommand);
 
-        if (command.containsKey("readConcern") && (command.getDocument("readConcern").containsKey("afterClusterTime"))) {
-            command.getDocument("readConcern").put("afterClusterTime", new BsonInt32(42));
+        if (actualCommand.containsKey("readConcern") && (actualCommand.getDocument("readConcern").containsKey("afterClusterTime"))) {
+            actualCommand.getDocument("readConcern").put("afterClusterTime", new BsonInt32(42));
         }
-        // Tests expect maxTimeMS to be int32, but Java API requires maxTime to be a long.  This massage seems preferable to casting
-        if (command.containsKey("maxTimeMS")) {
-            command.put("maxTimeMS", new BsonInt32(command.getNumber("maxTimeMS").intValue()));
+        if (actualCommand.containsKey("maxTimeMS")  && !isExpectedMaxTimeMsLong(expectedCommand)) {
+            // Some tests expect maxTimeMS to be int32, but Java API requires maxTime to be a long.  This massage seems preferable to casting
+            actualCommand.put("maxTimeMS", new BsonInt32(actualCommand.getNumber("maxTimeMS").intValue()));
         }
         // Tests do not expect the "ns" field in a result after running createIndex.
-        if (command.containsKey("createIndexes") && command.containsKey("indexes")) {
-            massageCommandIndexes(command.getArray("indexes"));
+        if (actualCommand.containsKey("createIndexes") && actualCommand.containsKey("indexes")) {
+            massageCommandIndexes(actualCommand.getArray("indexes"));
         }
-        massageActualCommand(command, expectedCommandStartedEvent.getCommand());
+        massageActualCommand(actualCommand, expectedCommand);
 
         return new CommandStartedEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(),
-                event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), command);
+                event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), actualCommand);
+    }
+
+    private static boolean isExpectedMaxTimeMsLong(final BsonDocument expectedCommand) {
+        if (expectedCommand.containsKey("maxTimeMS")) {
+            return expectedCommand.get("maxTimeMS").getBsonType() == BsonType.INT64;
+        }
+        return false;
     }
 
     private static void massageCommandIndexes(final BsonArray indexes) {
diff --git a/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java
index 8ebb1204ba3..119babf8875 100644
--- a/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java
+++ b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java
@@ -21,7 +21,10 @@
 import org.bson.BsonType;
 import org.bson.BsonValue;
 
-import static org.junit.Assert.assertEquals;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static java.util.Collections.singletonList;
 
 public final class CrudTestHelper {
 
@@ -32,15 +35,12 @@ public static void replaceTypeAssertionWithActual(final BsonDocument expected, f
                 BsonDocument valueDocument = value.asDocument();
                 BsonValue actualValue = actual.get(key);
                 if (valueDocument.size() == 1 && valueDocument.getFirstKey().equals("$$type")) {
-                    String type = valueDocument.getString("$$type").getValue();
-                    if (type.equals("binData")) {
-                        assertEquals(BsonType.BINARY, actualValue.getBsonType());
-                        expected.put(key, actualValue);
-                    } else if (type.equals("long")) {
-                        assertEquals(BsonType.INT64, actualValue.getBsonType());
+                    List<String> types = getExpectedTypes(valueDocument.get("$$type"));
+                    String actualType = asTypeString(actualValue.getBsonType());
+                    if (types.contains(actualType)) {
                         expected.put(key, actualValue);
                     } else {
-                        throw new UnsupportedOperationException("Unsupported type: " + type);
+                        throw new UnsupportedOperationException("Unsupported type: " + actualValue);
                     }
                 } else if (actualValue != null && actualValue.isDocument()) {
                     replaceTypeAssertionWithActual(valueDocument, actualValue.asDocument());
@@ -53,6 +53,31 @@ public static void replaceTypeAssertionWithActual(final BsonDocument expected, f
         }
     }
 
+    private static String asTypeString(final BsonType bsonType) {
+        switch (bsonType) {
+            case BINARY:
+                return "binData";
+            case INT32:
+                return "int";
+            case INT64:
+                return "long";
+            default:
+                throw new UnsupportedOperationException("Unsupported bson type conversion to string: " + bsonType);
+        }
+    }
+
+    private static List<String> getExpectedTypes(final BsonValue expectedTypes) {
+        List<String> types;
+        if (expectedTypes.isString()) {
+            types = singletonList(expectedTypes.asString().getValue());
+        } else if (expectedTypes.isArray()) {
+            types = expectedTypes.asArray().stream().map(type -> type.asString().getValue()).collect(Collectors.toList());
+        } else {
+            throw new UnsupportedOperationException("Unsupported type for $$type value");
+        }
+        return types;
+    }
+
     private static void replaceTypeAssertionWithActual(final BsonArray expected, final BsonArray actual) {
         for (int i = 0; i < expected.size(); i++) {
             BsonValue value = expected.get(i);
@@ -63,6 +88,7 @@ private static void replaceTypeAssertionWithActual(final BsonArray expected, fin
             }
         }
     }
+
     private CrudTestHelper() {
     }
 
diff --git a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java
index 01ed641e4b1..1cc3904749d 100644
--- a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java
+++ b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java
@@ -17,9 +17,9 @@
 
 import com.mongodb.ReadPreference;
 import com.mongodb.connection.ConnectionDescription;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.SplittablePayload;
 import org.bson.BsonDocument;
 import org.bson.FieldNameValidator;
@@ -56,19 +56,19 @@ public ConnectionDescription getDescription() {
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
             final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-            final BindingContext context) {
+            final OperationContext operationContext) {
         SupplyingCallback<T> callback = new SupplyingCallback<>();
-        wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, callback);
+        wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, callback);
         return callback.get();
     }
 
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
             final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-            final BindingContext context, final boolean responseExpected, final SplittablePayload payload,
+            final OperationContext operationContext, final boolean responseExpected, final SplittablePayload payload,
             final FieldNameValidator payloadFieldNameValidator) {
         SupplyingCallback<T> callback = new SupplyingCallback<>();
-        wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, context,
+        wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext,
                 responseExpected, payload, payloadFieldNameValidator, callback);
         return callback.get();
     }
diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java
index 9e17843d9fe..e297726d325 100644
--- a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java
+++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java
@@ -85,7 +85,8 @@ public CollectionHelper(final Codec<T> codec, final MongoNamespace namespace) {
     }
 
     public T hello() {
-        return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec).execute(getBinding());
+        return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec)
+                .execute(getBinding());
     }
 
     public static void drop(final MongoNamespace namespace) {
@@ -160,9 +161,27 @@ public void create(final String collectionName, final CreateCollectionOptions op
         create(collectionName, options, WriteConcern.ACKNOWLEDGED);
     }
 
+    public void create(final WriteConcern writeConcern, final BsonDocument createOptions) {
+        CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions();
+        for (String option : createOptions.keySet()) {
+            switch (option) {
+                case "capped":
+                    createCollectionOptions.capped(createOptions.getBoolean("capped").getValue());
+                    break;
+                case "size":
+                    createCollectionOptions.sizeInBytes(createOptions.getNumber("size").longValue());
+                    break;
+                default:
+                    throw new UnsupportedOperationException("Unsupported create collection option: " + option);
+            }
+        }
+        create(namespace.getCollectionName(), createCollectionOptions, writeConcern);
+    }
+
     public void create(final String collectionName, final CreateCollectionOptions options, final WriteConcern writeConcern) {
         drop(namespace, writeConcern);
-        CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, writeConcern)
+        CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName,
+                                                                            writeConcern)
                 .capped(options.isCapped())
                 .sizeInBytes(options.getSizeInBytes())
                 .maxDocuments(options.getMaxDocuments());
@@ -217,6 +236,10 @@ public void insertDocuments(final BsonDocument... documents) {
         insertDocuments(asList(documents));
     }
 
+    public void insertDocuments(final WriteConcern writeConcern, final BsonDocument... documents) {
+        insertDocuments(asList(documents), writeConcern);
+    }
+
     public void insertDocuments(final List<BsonDocument> documents) {
         insertDocuments(documents, getBinding());
     }
@@ -301,18 +324,18 @@ public void updateOne(final Bson filter, final Bson update, final boolean isUpse
 
     public void replaceOne(final Bson filter, final Bson update, final boolean isUpsert) {
         new MixedBulkWriteOperation(namespace,
-                singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry),
+                                    singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry),
                         update.toBsonDocument(Document.class, registry),
                         WriteRequest.Type.REPLACE)
                         .upsert(isUpsert)),
-                true, WriteConcern.ACKNOWLEDGED, false)
+                                    true, WriteConcern.ACKNOWLEDGED, false)
                 .execute(getBinding());
     }
 
     public void deleteOne(final Bson filter) {
         new MixedBulkWriteOperation(namespace,
-                singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))),
-                true, WriteConcern.ACKNOWLEDGED, false)
+                                    singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))),
+                                    true, WriteConcern.ACKNOWLEDGED, false)
                 .execute(getBinding());
     }
 
@@ -333,11 +356,11 @@ public List<T> aggregateDb(final List<Bson> pipeline) {
     }
 
     private <D> List<D> aggregate(final List<Bson> pipeline, final Decoder<D> decoder, final AggregationLevel level) {
-        List<BsonDocument> bsonDocumentPipeline = new ArrayList<BsonDocument>();
+        List<BsonDocument> bsonDocumentPipeline = new ArrayList<>();
         for (Bson cur : pipeline) {
             bsonDocumentPipeline.add(cur.toBsonDocument(Document.class, registry));
         }
-        BatchCursor<D> cursor = new AggregateOperation<D>(namespace, bsonDocumentPipeline, decoder, level)
+        BatchCursor<D> cursor = new AggregateOperation<>(namespace, bsonDocumentPipeline, decoder, level)
                 .execute(getBinding());
         List<D> results = new ArrayList<>();
         while (cursor.hasNext()) {
@@ -372,8 +395,8 @@ public <D> List<D> find(final BsonDocument filter, final BsonDocument sort, fina
     }
 
     public <D> List<D> find(final BsonDocument filter, final BsonDocument sort, final BsonDocument projection, final Decoder<D> decoder) {
-        BatchCursor<D> cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort).projection(projection)
-                                                                        .execute(getBinding());
+        BatchCursor<D> cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort)
+                .projection(projection).execute(getBinding());
         List<D> results = new ArrayList<>();
         while (cursor.hasNext()) {
             results.addAll(cursor.next());
@@ -394,7 +417,8 @@ public long count(final AsyncReadWriteBinding binding) throws Throwable {
     }
 
     public long count(final Bson filter) {
-        return new CountDocumentsOperation(namespace).filter(toBsonDocument(filter)).execute(getBinding());
+        return new CountDocumentsOperation(namespace)
+                .filter(toBsonDocument(filter)).execute(getBinding());
     }
 
     public BsonDocument wrap(final Document document) {
@@ -406,31 +430,35 @@ public BsonDocument toBsonDocument(final Bson document) {
     }
 
     public void createIndex(final BsonDocument key) {
-        new CreateIndexesOperation(namespace, asList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED).execute(getBinding());
+        new CreateIndexesOperation(namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED)
+                .execute(getBinding());
     }
 
     public void createIndex(final Document key) {
-        new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED).execute(getBinding());
+        new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED)
+                .execute(getBinding());
     }
 
     public void createUniqueIndex(final Document key) {
-        new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).unique(true)), WriteConcern.ACKNOWLEDGED)
+        new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key)).unique(true)),
+                                   WriteConcern.ACKNOWLEDGED)
                 .execute(getBinding());
     }
 
     public void createIndex(final Document key, final String defaultLanguage) {
-        new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)),
-                                          WriteConcern.ACKNOWLEDGED).execute(getBinding());
+        new CreateIndexesOperation(namespace,
+                                   singletonList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), WriteConcern.ACKNOWLEDGED).execute(getBinding());
     }
 
     public void createIndex(final Bson key) {
-        new CreateIndexesOperation(namespace, asList(new IndexRequest(key.toBsonDocument(Document.class, registry))),
-                                          WriteConcern.ACKNOWLEDGED).execute(getBinding());
+        new CreateIndexesOperation(namespace,
+                                   singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry))), WriteConcern.ACKNOWLEDGED).execute(getBinding());
     }
 
     public List<BsonDocument> listIndexes(){
         List<BsonDocument> indexes = new ArrayList<>();
-        BatchCursor<BsonDocument> cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()).execute(getBinding());
+        BatchCursor<BsonDocument> cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec())
+                .execute(getBinding());
         while (cursor.hasNext()) {
             indexes.addAll(cursor.next());
         }
@@ -439,8 +467,8 @@ public List<BsonDocument> listIndexes(){
 
     public static void killAllSessions() {
         try {
-            new CommandReadOperation<>("admin", new BsonDocument("killAllSessions", new BsonArray()),
-                    new BsonDocumentCodec()).execute(getBinding());
+            new CommandReadOperation<>("admin",
+                                       new BsonDocument("killAllSessions", new BsonArray()), new BsonDocumentCodec()).execute(getBinding());
         } catch (MongoCommandException e) {
             // ignore exception caused by killing the implicit session that the killAllSessions command itself is running in
         }
@@ -449,9 +477,8 @@ public static void killAllSessions() {
     public void renameCollection(final MongoNamespace newNamespace) {
         try {
             new CommandReadOperation<>("admin",
-                    new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName()))
-                                .append("to", new BsonString(newNamespace.getFullName())),
-                    new BsonDocumentCodec()).execute(getBinding());
+                                       new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName()))
+                            .append("to", new BsonString(newNamespace.getFullName())), new BsonDocumentCodec()).execute(getBinding());
         } catch (MongoCommandException e) {
             // do nothing
         }
@@ -462,10 +489,12 @@ public void runAdminCommand(final String command) {
     }
 
     public void runAdminCommand(final BsonDocument command) {
-        new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding());
+        new CommandReadOperation<>("admin", command, new BsonDocumentCodec())
+                .execute(getBinding());
     }
 
     public void runAdminCommand(final BsonDocument command, final ReadPreference readPreference) {
-        new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding(readPreference));
+        new CommandReadOperation<>("admin", command, new BsonDocumentCodec())
+                .execute(getBinding(readPreference));
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy
index d75d6ef489e..b3da89231e7 100644
--- a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy
@@ -66,6 +66,6 @@ class ConnectionSpecification extends OperationFunctionalSpecification {
     }
    private static BsonDocument getHelloResult() {
         new CommandReadOperation<BsonDocument>('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)),
-                                               new BsonDocumentCodec()).execute(getBinding())
+                new BsonDocumentCodec()).execute(getBinding())
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy
index 74dad9221c0..012ba23e339 100644
--- a/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy
@@ -18,6 +18,7 @@ import util.spock.annotations.Slow
 import java.util.concurrent.CountDownLatch
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getSslSettings
 
 class NettyStreamSpecification extends Specification {
@@ -42,7 +43,7 @@ class NettyStreamSpecification extends Specification {
         def stream = factory.create(new ServerAddress())
 
         when:
-        stream.open()
+        stream.open(OPERATION_CONTEXT)
 
         then:
         !stream.isClosed()
@@ -68,7 +69,7 @@ class NettyStreamSpecification extends Specification {
         def stream = factory.create(new ServerAddress())
 
         when:
-        stream.open()
+        stream.open(OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketOpenException)
@@ -95,7 +96,7 @@ class NettyStreamSpecification extends Specification {
         def callback = new CallbackErrorHolder()
 
         when:
-        stream.openAsync(callback)
+        stream.openAsync(OPERATION_CONTEXT, callback)
 
         then:
         callback.getError().is(exception)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java
new file mode 100644
index 00000000000..17b1a1c4a7e
--- /dev/null
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.binding;
+
+import com.mongodb.ReadPreference;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.async.SingleResultCallback;
+import com.mongodb.internal.connection.AsyncConnection;
+import com.mongodb.internal.connection.OperationContext;
+
+import static org.bson.assertions.Assertions.notNull;
+
+public final class AsyncOperationContextBinding implements AsyncReadWriteBinding {
+
+    private final AsyncReadWriteBinding wrapped;
+    private final OperationContext operationContext;
+
+    public AsyncOperationContextBinding(final AsyncReadWriteBinding wrapped, final OperationContext operationContext) {
+        this.wrapped = notNull("wrapped", wrapped);
+        this.operationContext = notNull("operationContext", operationContext);
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return wrapped.getReadPreference();
+    }
+
+    @Override
+    public void getWriteConnectionSource(final SingleResultCallback<AsyncConnectionSource> callback) {
+        wrapped.getWriteConnectionSource((result, t) -> {
+            if (t != null) {
+                callback.onResult(null, t);
+            } else {
+                callback.onResult(new SessionBindingAsyncConnectionSource(result), null);
+            }
+        });
+    }
+
+    @Override
+    public OperationContext getOperationContext() {
+        return operationContext;
+    }
+
+    @Override
+    public void getReadConnectionSource(final SingleResultCallback<AsyncConnectionSource> callback) {
+        wrapped.getReadConnectionSource((result, t) -> {
+            if (t != null) {
+                callback.onResult(null, t);
+            } else {
+                callback.onResult(new SessionBindingAsyncConnectionSource(result), null);
+            }
+        });
+    }
+
+
+    @Override
+    public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference,
+            final SingleResultCallback<AsyncConnectionSource> callback) {
+        wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, (result, t) -> {
+            if (t != null) {
+                callback.onResult(null, t);
+            } else {
+                callback.onResult(new SessionBindingAsyncConnectionSource(result), null);
+            }
+        });
+    }
+
+    @Override
+    public int getCount() {
+        return wrapped.getCount();
+    }
+
+    @Override
+    public AsyncReadWriteBinding retain() {
+        wrapped.retain();
+        return this;
+    }
+
+    @Override
+    public int release() {
+        return wrapped.release();
+    }
+
+    private class SessionBindingAsyncConnectionSource implements AsyncConnectionSource {
+        private final AsyncConnectionSource wrapped;
+
+        SessionBindingAsyncConnectionSource(final AsyncConnectionSource wrapped) {
+            this.wrapped = wrapped;
+        }
+
+        @Override
+        public ServerDescription getServerDescription() {
+            return wrapped.getServerDescription();
+        }
+
+        @Override
+        public OperationContext getOperationContext() {
+            return operationContext;
+        }
+
+        @Override
+        public ReadPreference getReadPreference() {
+            return wrapped.getReadPreference();
+        }
+
+        @Override
+        public void getConnection(final SingleResultCallback<AsyncConnection> callback) {
+            wrapped.getConnection(callback);
+        }
+
+        @Override
+        public int getCount() {
+            return wrapped.getCount();
+        }
+
+        @Override
+        public AsyncConnectionSource retain() {
+            wrapped.retain();
+            return this;
+        }
+
+        @Override
+        public int release() {
+            return wrapped.release();
+        }
+    }
+
+    public AsyncReadWriteBinding getWrapped() {
+        return wrapped;
+    }
+}
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java
index ea56301e8cb..fa588a340d0 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java
@@ -17,25 +17,21 @@
 package com.mongodb.internal.binding;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import static org.bson.assertions.Assertions.notNull;
 
 public final class AsyncSessionBinding implements AsyncReadWriteBinding {
 
     private final AsyncReadWriteBinding wrapped;
-    private final SessionContext sessionContext;
+    private final OperationContext operationContext;
 
     public AsyncSessionBinding(final AsyncReadWriteBinding wrapped) {
         this.wrapped = notNull("wrapped", wrapped);
-        this.sessionContext = new SimpleSessionContext();
+        this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext());
     }
 
     @Override
@@ -54,25 +50,9 @@ public void getWriteConnectionSource(final SingleResultCallback<AsyncConnectionS
         });
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return sessionContext;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
-        return wrapped.getOperationContext();
+        return operationContext;
     }
 
     @Override
@@ -127,25 +107,9 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return sessionContext;
-        }
-
-        @Override
-        @Nullable
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
-        }
-
         @Override
         public OperationContext getOperationContext() {
-            return wrapped.getOperationContext();
+            return operationContext;
         }
 
         @Override
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy
index d0e771ed092..87fa1b9c4ff 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy
@@ -19,11 +19,14 @@ package com.mongodb.internal.binding
 import com.mongodb.internal.async.SingleResultCallback
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class AsyncSessionBindingSpecification extends Specification {
 
     def 'should wrap the passed in async binding'() {
         given:
         def wrapped = Mock(AsyncReadWriteBinding)
+        wrapped.getOperationContext() >> OPERATION_CONTEXT
         def binding = new AsyncSessionBinding(wrapped)
 
         when:
@@ -63,10 +66,10 @@ class AsyncSessionBindingSpecification extends Specification {
         1 * wrapped.getWriteConnectionSource(_)
 
         when:
-        def context = binding.getSessionContext()
+        def context = binding.getOperationContext().getSessionContext()
 
         then:
-        0 * wrapped.getSessionContext()
+        0 * wrapped.getOperationContext().getSessionContext()
         context instanceof SimpleSessionContext
     }
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java
index ca783beb2df..3fff8b66e06 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java
@@ -19,20 +19,14 @@
 import com.mongodb.MongoInternalException;
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ServerDescription;
-import com.mongodb.internal.IgnorableRequestContext;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Cluster;
-import com.mongodb.internal.connection.NoOpSessionContext;
 import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.Server;
 import com.mongodb.internal.selector.ReadPreferenceServerSelector;
 import com.mongodb.internal.selector.WritableServerSelector;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -54,35 +48,18 @@ public class AsyncSingleConnectionBinding extends AbstractReferenceCounted imple
     private volatile Server writeServer;
     private volatile ServerDescription readServerDescription;
     private volatile ServerDescription writeServerDescription;
-    @Nullable
-    private final ServerApi serverApi;
-    private final OperationContext operationContext = new OperationContext();
+    private final OperationContext operationContext;
 
     /**
      * Create a new binding with the given cluster.
-     *  @param cluster     a non-null Cluster which will be used to select a server to bind to
-     * @param maxWaitTime the maximum time to wait for a connection to become available.
-     * @param timeUnit    a non-null TimeUnit for the maxWaitTime
-     * @param serverApi   the server api, which may be null
-     */
-    public AsyncSingleConnectionBinding(final Cluster cluster, final long maxWaitTime, final TimeUnit timeUnit,
-                                        @Nullable final ServerApi serverApi) {
-        this(cluster, primary(), maxWaitTime, timeUnit, serverApi);
-    }
-
-    /**
-     * Create a new binding with the given cluster.
-     *  @param cluster        a non-null Cluster which will be used to select a server to bind to
+     *
+     * @param cluster        a non-null Cluster which will be used to select a server to bind to
      * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads
-     * @param maxWaitTime    the maximum time to wait for a connection to become available.
-     * @param timeUnit       a non-null TimeUnit for the maxWaitTime
-     * @param serverApi      the server api, which may be null
+     * @param operationContext the operation context
      */
-    public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference,
-                                        final long maxWaitTime, final TimeUnit timeUnit, @Nullable final ServerApi serverApi) {
-        this.serverApi = serverApi;
-
+    public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) {
         notNull("cluster", cluster);
+        this.operationContext = operationContext;
         this.readPreference = notNull("readPreference", readPreference);
         CountDownLatch latch = new CountDownLatch(2);
         cluster.selectServerAsync(new WritableServerSelector(), operationContext, (result, t) -> {
@@ -100,7 +77,7 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference
             }
         });
 
-        awaitLatch(maxWaitTime, timeUnit, latch);
+        awaitLatch(latch);
 
         if (writeServer == null || readServer == null) {
             throw new MongoInternalException("Failure to select server");
@@ -112,7 +89,7 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference
             writeServerLatch.countDown();
         });
 
-        awaitLatch(maxWaitTime, timeUnit, writeServerLatch);
+        awaitLatch(writeServerLatch);
 
         if (writeConnection == null) {
             throw new MongoInternalException("Failure to get connection");
@@ -124,16 +101,16 @@ public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference
             readConnection = result;
             readServerLatch.countDown();
         });
-        awaitLatch(maxWaitTime, timeUnit, readServerLatch);
+        awaitLatch(readServerLatch);
 
         if (readConnection == null) {
             throw new MongoInternalException("Failure to get connection");
         }
     }
 
-    private void awaitLatch(final long maxWaitTime, final TimeUnit timeUnit, final CountDownLatch latch) {
+    private void awaitLatch(final CountDownLatch latch) {
         try {
-            if (!latch.await(maxWaitTime, timeUnit)) {
+            if (!latch.await(operationContext.getTimeoutContext().timeoutOrAlternative(10000), TimeUnit.MILLISECONDS)) {
                 throw new MongoTimeoutException("Failed to get servers");
             }
         } catch (InterruptedException e) {
@@ -152,22 +129,6 @@ public ReadPreference getReadPreference() {
         return readPreference;
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return NoOpSessionContext.INSTANCE;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return IgnorableRequestContext.INSTANCE;
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return operationContext;
@@ -221,22 +182,6 @@ public ServerDescription getServerDescription() {
             return serverDescription;
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return NoOpSessionContext.INSTANCE;
-        }
-
-        @Override
-        @Nullable
-        public ServerApi getServerApi() {
-            return serverApi;
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return IgnorableRequestContext.INSTANCE;
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return operationContext;
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java
new file mode 100644
index 00000000000..6af3f4520d4
--- /dev/null
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.binding;
+
+import com.mongodb.ReadPreference;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
+
+import static org.bson.assertions.Assertions.notNull;
+
+public class OperationContextBinding implements ReadWriteBinding {
+    private final ReadWriteBinding wrapped;
+    private final OperationContext operationContext;
+
+    public OperationContextBinding(final ReadWriteBinding wrapped, final OperationContext operationContext) {
+        this.wrapped = notNull("wrapped", wrapped);
+        this.operationContext = notNull("operationContext", operationContext);
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return wrapped.getReadPreference();
+    }
+
+    @Override
+    public int getCount() {
+        return wrapped.getCount();
+    }
+
+    @Override
+    public ReadWriteBinding retain() {
+        wrapped.retain();
+        return this;
+    }
+
+    @Override
+    public int release() {
+        return wrapped.release();
+    }
+
+    @Override
+    public ConnectionSource getReadConnectionSource() {
+        return new SessionBindingConnectionSource(wrapped.getReadConnectionSource());
+    }
+
+    @Override
+    public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) {
+        return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference));
+    }
+
+    @Override
+    public OperationContext getOperationContext() {
+        return operationContext;
+    }
+
+    @Override
+    public ConnectionSource getWriteConnectionSource() {
+        return new SessionBindingConnectionSource(wrapped.getWriteConnectionSource());
+    }
+
+    private class SessionBindingConnectionSource implements ConnectionSource {
+        private ConnectionSource wrapped;
+
+        SessionBindingConnectionSource(final ConnectionSource wrapped) {
+            this.wrapped = wrapped;
+        }
+
+        @Override
+        public ServerDescription getServerDescription() {
+            return wrapped.getServerDescription();
+        }
+
+        @Override
+        public OperationContext getOperationContext() {
+            return operationContext;
+        }
+
+        @Override
+        public ReadPreference getReadPreference() {
+            return wrapped.getReadPreference();
+        }
+
+        @Override
+        public Connection getConnection() {
+            return wrapped.getConnection();
+        }
+
+        @Override
+        public ConnectionSource retain() {
+            wrapped = wrapped.retain();
+            return this;
+        }
+
+        @Override
+        public int getCount() {
+            return wrapped.getCount();
+        }
+
+        @Override
+        public int release() {
+            return wrapped.release();
+        }
+    }
+
+    public ReadWriteBinding getWrapped() {
+        return wrapped;
+    }
+}
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java
index 4005a56af2b..3a2666a8093 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java
@@ -17,23 +17,19 @@
 package com.mongodb.internal.binding;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.connection.Connection;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
+import com.mongodb.internal.connection.OperationContext;
 
 import static org.bson.assertions.Assertions.notNull;
 
 public class SessionBinding implements ReadWriteBinding {
     private final ReadWriteBinding wrapped;
-    private final SessionContext sessionContext;
+    private final OperationContext operationContext;
 
     public SessionBinding(final ReadWriteBinding wrapped) {
         this.wrapped = notNull("wrapped", wrapped);
-        this.sessionContext = new SimpleSessionContext();
+        this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext());
     }
 
     @Override
@@ -67,25 +63,9 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final
         return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference));
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return sessionContext;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
-        return wrapped.getOperationContext();
+        return operationContext;
     }
 
     @Override
@@ -105,24 +85,9 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return sessionContext;
-        }
-
         @Override
         public OperationContext getOperationContext() {
-            return wrapped.getOperationContext();
-        }
-
-        @Override
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
+            return operationContext;
         }
 
         @Override
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java
index bff96ee9941..ee258fb28cf 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java
@@ -28,13 +28,13 @@
 
 import java.util.UUID;
 
-class SimpleSessionContext implements SessionContext {
+public class SimpleSessionContext implements SessionContext {
     private final BsonDocument sessionId;
     private BsonTimestamp operationTime;
     private long counter;
     private BsonDocument clusterTime;
 
-    SimpleSessionContext() {
+    public SimpleSessionContext() {
         this.sessionId = createNewServerSessionIdentifier();
     }
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java
index e371003fc75..6bf3cff636d 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java
@@ -17,19 +17,13 @@
 package com.mongodb.internal.binding;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.connection.ServerDescription;
-import com.mongodb.internal.IgnorableRequestContext;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.Connection;
-import com.mongodb.internal.connection.NoOpSessionContext;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.ServerTuple;
 import com.mongodb.internal.selector.ReadPreferenceServerSelector;
 import com.mongodb.internal.selector.WritableServerSelector;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.assertions.Assertions.isTrue;
@@ -47,8 +41,6 @@ public class SingleConnectionBinding implements ReadWriteBinding {
     private final ServerDescription readServerDescription;
     private final ServerDescription writeServerDescription;
     private int count = 1;
-    @Nullable
-    private final ServerApi serverApi;
     private final OperationContext operationContext;
 
     /**
@@ -56,12 +48,12 @@ public class SingleConnectionBinding implements ReadWriteBinding {
      *
      * @param cluster     a non-null Cluster which will be used to select a server to bind to
      * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads
+     *
      */
-    public SingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, @Nullable final ServerApi serverApi) {
-        this.serverApi = serverApi;
-        operationContext = new OperationContext();
+    public SingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) {
         notNull("cluster", cluster);
         this.readPreference = notNull("readPreference", readPreference);
+        this.operationContext = operationContext;
         ServerTuple writeServerTuple = cluster.selectServer(new WritableServerSelector(), operationContext);
         writeServerDescription = writeServerTuple.getServerDescription();
         writeConnection = writeServerTuple.getServer().getConnection(operationContext);
@@ -112,22 +104,6 @@ public ConnectionSource getReadConnectionSource(final int minWireVersion, final
         throw new UnsupportedOperationException();
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return NoOpSessionContext.INSTANCE;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return serverApi;
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return IgnorableRequestContext.INSTANCE;
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return operationContext;
@@ -155,26 +131,11 @@ public ServerDescription getServerDescription() {
             return serverDescription;
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return NoOpSessionContext.INSTANCE;
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return operationContext;
         }
 
-        @Override
-        public ServerApi getServerApi() {
-            return serverApi;
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return IgnorableRequestContext.INSTANCE;
-        }
-
         @Override
         public ReadPreference getReadPreference() {
             return readPreference;
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy
index b857c2574bd..0ac6b8fd9df 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy
@@ -13,6 +13,7 @@ import util.spock.annotations.Slow
 
 import java.util.concurrent.CountDownLatch
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getSslSettings
 import static java.util.concurrent.TimeUnit.MILLISECONDS
 
@@ -39,7 +40,7 @@ class AsyncSocketChannelStreamSpecification extends Specification {
         def stream = factory.create(new ServerAddress('host1'))
 
         when:
-        stream.open()
+        stream.open(OPERATION_CONTEXT)
 
         then:
         !stream.isClosed()
@@ -65,7 +66,7 @@ class AsyncSocketChannelStreamSpecification extends Specification {
         def stream = factory.create(new ServerAddress())
 
         when:
-        stream.open()
+        stream.open(OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketOpenException)
@@ -89,7 +90,7 @@ class AsyncSocketChannelStreamSpecification extends Specification {
         def callback = new CallbackErrorHolder()
 
         when:
-        stream.openAsync(callback)
+        stream.openAsync(OPERATION_CONTEXT, callback)
 
         then:
         callback.getError().is(exception)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy
index 858b5ce6c84..6efe88806e8 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy
@@ -18,7 +18,6 @@ package com.mongodb.internal.connection
 
 import com.mongodb.LoggerSettings
 import com.mongodb.MongoSocketOpenException
-import com.mongodb.MongoSocketReadTimeoutException
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ServerAddress
 import com.mongodb.connection.ClusterConnectionMode
@@ -26,26 +25,20 @@ import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ServerId
 import com.mongodb.connection.SocketSettings
 import com.mongodb.internal.connection.netty.NettyStreamFactory
-import org.bson.BsonDocument
-import org.bson.BsonInt32
-import org.bson.BsonString
 import spock.lang.IgnoreIf
 import util.spock.annotations.Slow
 
 import java.util.concurrent.TimeUnit
 
-import static com.mongodb.ClusterFixture.getClusterConnectionMode
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getCredentialWithCache
-import static com.mongodb.ClusterFixture.getPrimary
 import static com.mongodb.ClusterFixture.getServerApi
 import static com.mongodb.ClusterFixture.getSslSettings
-import static com.mongodb.internal.connection.CommandHelper.executeCommand
 
 @Slow
 class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification {
 
     static SocketSettings openSocketSettings = SocketSettings.builder().connectTimeout(1, TimeUnit.MILLISECONDS).build()
-    static SocketSettings readSocketSettings = SocketSettings.builder().readTimeout(5, TimeUnit.SECONDS).build()
 
     @IgnoreIf({ getSslSettings().isEnabled() })
     def 'should throw a MongoSocketOpenException when the AsynchronousSocket Stream fails to open'() {
@@ -56,35 +49,12 @@ class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification
                 .create(new ServerId(new ClusterId(), new ServerAddress(new InetSocketAddress('192.168.255.255', 27017))))
 
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketOpenException)
     }
 
-    @IgnoreIf({ getSslSettings().isEnabled() })
-    def 'should throw a MongoSocketReadTimeoutException with the AsynchronousSocket stream'() {
-        given:
-        def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE,
-                new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), readSocketSettings, getSslSettings()),
-                getCredentialWithCache(), null, null,
-                [], LoggerSettings.builder().build(), null, getServerApi()).create(new ServerId(new ClusterId(), getPrimary()))
-        connection.open()
-
-        getCollectionHelper().insertDocuments(new BsonDocument('_id', new BsonInt32(1)))
-        def countCommand = new BsonDocument('count', new BsonString(getCollectionName()))
-        countCommand.put('query', new BsonDocument('$where', new BsonString('sleep(5050); return true;')))
-
-        when:
-        executeCommand(getDatabaseName(), countCommand,  getClusterConnectionMode(), getServerApi(), connection)
-
-        then:
-        thrown(MongoSocketReadTimeoutException)
-
-        cleanup:
-        connection?.close()
-    }
-
     def 'should throw a MongoSocketOpenException when the Netty Stream fails to open'() {
         given:
         def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE,
@@ -93,32 +63,10 @@ class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification
                 new ServerAddress(new InetSocketAddress('192.168.255.255', 27017))))
 
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketOpenException)
     }
 
-
-    def 'should throw a MongoSocketReadTimeoutException with the Netty stream'() {
-        given:
-        def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE,
-                new NettyStreamFactory(readSocketSettings, getSslSettings()), getCredentialWithCache(), null, null,
-                [], LoggerSettings.builder().build(), null, getServerApi()).create(new ServerId(new ClusterId(), getPrimary()))
-        connection.open()
-
-        getCollectionHelper().insertDocuments(new BsonDocument('_id', new BsonInt32(1)))
-        def countCommand = new BsonDocument('count', new BsonString(getCollectionName()))
-        countCommand.put('query', new BsonDocument('$where', new BsonString('sleep(5050); return true;')))
-
-        when:
-        executeCommand(getDatabaseName(), countCommand, getClusterConnectionMode(), getServerApi(), connection)
-
-        then:
-        thrown(MongoSocketReadTimeoutException)
-
-        cleanup:
-        connection?.close()
-    }
-
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy
index 21979eb87ce..8dd53bc1c03 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy
@@ -19,6 +19,7 @@ import spock.lang.Specification
 import java.util.function.Supplier
 
 import static com.mongodb.AuthenticationMechanism.MONGODB_AWS
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getClusterConnectionMode
 import static com.mongodb.ClusterFixture.getConnectionString
 import static com.mongodb.ClusterFixture.getCredential
@@ -51,7 +52,7 @@ class AwsAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         thrown(MongoCommandException)
@@ -70,7 +71,7 @@ class AwsAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -100,7 +101,7 @@ class AwsAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -159,10 +160,10 @@ class AwsAuthenticationSpecification extends Specification {
     private static void openConnection(final InternalConnection connection, final boolean async) {
         if (async) {
             FutureResultCallback<Void> futureResultCallback = new FutureResultCallback<Void>()
-            connection.openAsync(futureResultCallback)
+            connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
             futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS)
         } else {
-            connection.open()
+            connection.open(OPERATION_CONTEXT)
         }
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy
index 6f005eb9733..085a5100198 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy
@@ -30,6 +30,7 @@ import spock.lang.Specification
 import java.util.concurrent.CountDownLatch
 
 import static com.mongodb.ClusterFixture.LEGACY_HELLO
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getClusterConnectionMode
 import static com.mongodb.ClusterFixture.getCredentialWithCache
 import static com.mongodb.ClusterFixture.getPrimary
@@ -45,7 +46,7 @@ class CommandHelperSpecification extends Specification {
                 new NettyStreamFactory(SocketSettings.builder().build(), getSslSettings()),
                 getCredentialWithCache(), null, null, [], LoggerSettings.builder().build(), null, getServerApi())
                 .create(new ServerId(new ClusterId(), getPrimary()))
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
     }
 
     def cleanup() {
@@ -58,7 +59,7 @@ class CommandHelperSpecification extends Specification {
         Throwable receivedException = null
         def latch1 = new CountDownLatch(1)
         executeCommandAsync('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), getClusterConnectionMode(),
-                getServerApi(), connection)
+                getServerApi(), connection, OPERATION_CONTEXT)
                 { document, exception -> receivedDocument = document; receivedException = exception; latch1.countDown() }
         latch1.await()
 
@@ -70,7 +71,7 @@ class CommandHelperSpecification extends Specification {
         when:
         def latch2 = new CountDownLatch(1)
         executeCommandAsync('admin', new BsonDocument('non-existent-command', new BsonInt32(1)), getClusterConnectionMode(),
-                getServerApi(), connection)
+                getServerApi(), connection, OPERATION_CONTEXT)
                 { document, exception -> receivedDocument = document; receivedException = exception; latch2.countDown() }
         latch2.await()
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java
index 919e0b130a8..56122ec64af 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java
@@ -26,11 +26,13 @@
 import com.mongodb.connection.ConnectionPoolSettings;
 import com.mongodb.connection.ServerId;
 import com.mongodb.event.ConnectionCreatedEvent;
-import com.mongodb.internal.time.Timeout;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.inject.EmptyProvider;
 import com.mongodb.internal.inject.OptionalProvider;
 import com.mongodb.internal.inject.SameObjectProvider;
+import com.mongodb.internal.time.TimePointTest;
+import com.mongodb.internal.time.Timeout;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Tag;
@@ -58,6 +60,11 @@
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Stream;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static com.mongodb.ClusterFixture.createOperationContext;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
 import static java.lang.Long.MAX_VALUE;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.MINUTES;
@@ -110,14 +117,14 @@ public void shouldThrowOnTimeout() throws InterruptedException {
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
                 ConnectionPoolSettings.builder()
                         .maxSize(1)
-                        .maxWaitTime(50, MILLISECONDS)
                         .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
-        provider.get(new OperationContext());
+        TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50);
+        provider.get(createOperationContext(timeoutSettings));
 
         // when
-        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider);
+        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider, timeoutSettings);
         new Thread(connectionGetter).start();
 
         connectionGetter.getLatch().await();
@@ -131,17 +138,16 @@ public void shouldThrowOnPoolClosed() {
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
                 ConnectionPoolSettings.builder()
                         .maxSize(1)
-                        .maxWaitTime(50, MILLISECONDS)
                         .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.close();
 
         String expectedExceptionMessage = "The server at 127.0.0.1:27017 is no longer available";
         MongoServerUnavailableException exception;
-        exception = assertThrows(MongoServerUnavailableException.class, () -> provider.get(new OperationContext()));
+        exception = assertThrows(MongoServerUnavailableException.class, () -> provider.get(OPERATION_CONTEXT));
         assertEquals(expectedExceptionMessage, exception.getMessage());
         SupplyingCallback<InternalConnection> supplyingCallback = new SupplyingCallback<>();
-        provider.getAsync(new OperationContext(), supplyingCallback);
+        provider.getAsync(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)), supplyingCallback);
         exception = assertThrows(MongoServerUnavailableException.class, supplyingCallback::get);
         assertEquals(expectedExceptionMessage, exception.getMessage());
     }
@@ -155,14 +161,14 @@ public void shouldExpireConnectionAfterMaxLifeTime() throws InterruptedException
                         .maintenanceInitialDelay(5, MINUTES)
                         .maxConnectionLifeTime(50, MILLISECONDS)
                         .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
 
         // when
-        provider.get(new OperationContext()).close();
+        provider.get(OPERATION_CONTEXT).close();
         Thread.sleep(100);
         provider.doMaintenance();
-        provider.get(new OperationContext());
+        provider.get(OPERATION_CONTEXT);
 
         // then
         assertTrue(connectionFactory.getNumCreatedConnections() >= 2);  // should really be two, but it's racy
@@ -176,11 +182,11 @@ public void shouldExpireConnectionAfterLifeTimeOnClose() throws InterruptedExcep
                 ConnectionPoolSettings.builder()
                         .maxSize(1)
                         .maxConnectionLifeTime(20, MILLISECONDS).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
 
         // when
-        InternalConnection connection = provider.get(new OperationContext());
+        InternalConnection connection = provider.get(OPERATION_CONTEXT);
         Thread.sleep(50);
         connection.close();
 
@@ -197,14 +203,14 @@ public void shouldExpireConnectionAfterMaxIdleTime() throws InterruptedException
                         .maxSize(1)
                         .maintenanceInitialDelay(5, MINUTES)
                         .maxConnectionIdleTime(50, MILLISECONDS).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
 
         // when
-        provider.get(new OperationContext()).close();
+        provider.get(OPERATION_CONTEXT).close();
         Thread.sleep(100);
         provider.doMaintenance();
-        provider.get(new OperationContext());
+        provider.get(OPERATION_CONTEXT);
 
         // then
         assertTrue(connectionFactory.getNumCreatedConnections() >= 2);  // should really be two, but it's racy
@@ -219,14 +225,14 @@ public void shouldCloseConnectionAfterExpiration() throws InterruptedException {
                         .maxSize(1)
                         .maintenanceInitialDelay(5, MINUTES)
                         .maxConnectionLifeTime(20, MILLISECONDS).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
 
         // when
-        provider.get(new OperationContext()).close();
+        provider.get(OPERATION_CONTEXT).close();
         Thread.sleep(50);
         provider.doMaintenance();
-        provider.get(new OperationContext());
+        provider.get(OPERATION_CONTEXT);
 
         // then
         assertTrue(connectionFactory.getCreatedConnections().get(0).isClosed());
@@ -241,14 +247,14 @@ public void shouldCreateNewConnectionAfterExpiration() throws InterruptedExcepti
                         .maxSize(1)
                         .maintenanceInitialDelay(5, MINUTES)
                         .maxConnectionLifeTime(20, MILLISECONDS).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
 
         // when
-        provider.get(new OperationContext()).close();
+        provider.get(OPERATION_CONTEXT).close();
         Thread.sleep(50);
         provider.doMaintenance();
-        InternalConnection secondConnection = provider.get(new OperationContext());
+        InternalConnection secondConnection = provider.get(OPERATION_CONTEXT);
 
         // then
         assertNotNull(secondConnection);
@@ -265,9 +271,9 @@ public void shouldPruneAfterMaintenanceTaskRuns() throws InterruptedException {
                         .maxConnectionLifeTime(1, MILLISECONDS)
                         .maintenanceInitialDelay(5, MINUTES)
                         .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
-        provider.get(new OperationContext()).close();
+        provider.get(OPERATION_CONTEXT).close();
 
 
         // when
@@ -282,12 +288,12 @@ public void shouldPruneAfterMaintenanceTaskRuns() throws InterruptedException {
     void infiniteMaxSize() {
         int defaultMaxSize = ConnectionPoolSettings.builder().build().getMaxSize();
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                ConnectionPoolSettings.builder().maxSize(0).build(), EmptyProvider.instance());
+                ConnectionPoolSettings.builder().maxSize(0).build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
         List<InternalConnection> connections = new ArrayList<>();
         try {
             for (int i = 0; i < 2 * defaultMaxSize; i++) {
-                connections.add(provider.get(new OperationContext()));
+                connections.add(provider.get(OPERATION_CONTEXT));
             }
         } finally {
             connections.forEach(connection -> {
@@ -313,18 +319,17 @@ public void concurrentUsage(final int minSize, final int maxSize, final boolean
                 ConnectionPoolSettings.builder()
                     .minSize(minSize)
                     .maxSize(maxSize)
-                    .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)
                     .maintenanceInitialDelay(0, NANOSECONDS)
                     .maintenanceFrequency(100, MILLISECONDS)
                     .maxConnectionLifeTime(limitConnectionLifeIdleTime ? 350 : 0, MILLISECONDS)
                     .maxConnectionIdleTime(limitConnectionLifeIdleTime ? 50 : 0, MILLISECONDS)
                     .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
         assertUseConcurrently(provider, concurrentUsersCount,
                 checkoutSync, checkoutAsync,
                 invalidateAndReadyProb, invalidateProb, readyProb,
-                cachedExecutor, SECONDS.toNanos(10));
+                cachedExecutor, SECONDS.toNanos(10), TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS));
     }
 
     private static Stream<Arguments> concurrentUsageArguments() {
@@ -352,17 +357,17 @@ public void callbackShouldNotBlockCheckoutIfOpenAsyncWorksNotInCurrentThread() t
                 ConnectionPoolSettings.builder()
                     .maxSize(DEFAULT_MAX_CONNECTING + maxAvailableConnections)
                     .addConnectionPoolListener(listener)
-                    .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)
                     .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS)
                     .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
+        TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS);
         acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_CALLBACK,
-                controllableConnFactory, listener);
+                controllableConnFactory, listener, timeoutSettings);
         assertUseConcurrently(provider, 2 * maxAvailableConnections,
                 true, true,
                 0.02f, 0, 0,
-                cachedExecutor, SECONDS.toNanos(10));
+                cachedExecutor, SECONDS.toNanos(10), timeoutSettings);
     }
 
     /**
@@ -391,16 +396,17 @@ public void checkoutHandOverMechanism() throws InterruptedException, TimeoutExce
                              * the max pool size, and then check that no connections were created nonetheless. */
                             + maxConcurrentlyHandedOver)
                     .addConnectionPoolListener(listener)
-                    .maxWaitTime(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)
                     .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS)
                     .build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
         List<InternalConnection> connections = new ArrayList<>();
         for (int i = 0; i < openConnectionsCount; i++) {
-            connections.add(provider.get(new OperationContext(), 0, NANOSECONDS));
+            connections.add(provider.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(0))));
         }
-        acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_OPEN, controllableConnFactory, listener);
+        TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS);
+        acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_OPEN, controllableConnFactory, listener,
+                timeoutSettings);
         int previousIdx = 0;
         // concurrently check in / check out and assert the hand-over mechanism works
         for (int idx = 0; idx < connections.size(); idx += maxConcurrentlyHandedOver) {
@@ -416,7 +422,8 @@ public void checkoutHandOverMechanism() throws InterruptedException, TimeoutExce
                     return connectionId;
                 }));
                 Runnable checkOut = () -> receivedFutures.add(cachedExecutor.submit(() -> {
-                    InternalConnection connection = provider.get(new OperationContext(), TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS);
+                    InternalConnection connection =
+                            provider.get(createOperationContext(timeoutSettings));
                     return connection.getDescription().getConnectionId();
                 }));
                 if (ThreadLocalRandom.current().nextBoolean()) {
@@ -449,7 +456,7 @@ public void readyAfterCloseMustNotThrow() {
                 SERVER_ID,
                 connectionFactory,
                 ConnectionPoolSettings.builder().maxSize(1).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.close();
         provider.ready();
     }
@@ -460,7 +467,7 @@ public void invalidateAfterCloseMustNotThrow() {
                 SERVER_ID,
                 connectionFactory,
                 ConnectionPoolSettings.builder().maxSize(1).build(),
-                mockSdamProvider());
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
         provider.ready();
         provider.close();
         provider.invalidate(null);
@@ -474,7 +481,7 @@ public void readyInvalidateConcurrentWithCloseMustNotThrow() throws ExecutionExc
                     SERVER_ID,
                     connectionFactory,
                     ConnectionPoolSettings.builder().maxSize(1).build(),
-                    mockSdamProvider());
+                    mockSdamProvider(), OPERATION_CONTEXT_FACTORY);
             try {
                 readyAndInvalidateResult = cachedExecutor.submit(() -> {
                     provider.ready();
@@ -490,14 +497,15 @@ public void readyInvalidateConcurrentWithCloseMustNotThrow() throws ExecutionExc
     }
 
     private static void assertUseConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount,
-                                              final boolean sync, final boolean async,
-                                              final float invalidateAndReadyProb, final float invalidateProb, final float readyProb,
-                                              final ExecutorService executor, final long durationNanos) throws InterruptedException {
+            final boolean sync, final boolean async,
+            final float invalidateAndReadyProb, final float invalidateProb, final float readyProb,
+            final ExecutorService executor, final long durationNanos,
+            final TimeoutSettings timeoutSettings) throws InterruptedException {
         try {
             useConcurrently(pool, concurrentUsersCount,
                     sync, async,
                     invalidateAndReadyProb, invalidateProb, readyProb,
-                    executor, durationNanos);
+                    executor, durationNanos, timeoutSettings);
         } catch (TimeoutException | ExecutionException e) {
             throw new AssertionError(e);
         }
@@ -506,7 +514,8 @@ private static void assertUseConcurrently(final DefaultConnectionPool pool, fina
     private static void useConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount,
                                         final boolean checkoutSync, final boolean checkoutAsync,
                                         final float invalidateAndReadyProb, final float invalidateProb, final float readyProb,
-                                        final ExecutorService executor, final long durationNanos)
+                                        final ExecutorService executor, final long durationNanos,
+                                        final TimeoutSettings timeoutSettings)
             throws ExecutionException, InterruptedException, TimeoutException {
         assertTrue(invalidateAndReadyProb >= 0 && invalidateAndReadyProb <= 1);
         Runnable spontaneouslyInvalidateReady = () -> {
@@ -522,15 +531,18 @@ private static void useConcurrently(final DefaultConnectionPool pool, final int
             }
         };
         Collection<Future<?>> tasks = new ArrayList<>();
-        Timeout duration = Timeout.startNow(durationNanos);
+        Timeout timeout = Timeout.expiresIn(durationNanos, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
         for (int i = 0; i < concurrentUsersCount; i++) {
             if ((checkoutSync && checkoutAsync) ? i % 2 == 0 : checkoutSync) {//check out synchronously and check in
                 tasks.add(executor.submit(() -> {
-                    while (!(duration.expired() || Thread.currentThread().isInterrupted())) {
+                    while (!Thread.currentThread().isInterrupted()) {
+                        if (timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true)) {
+                            break;
+                        }
                         spontaneouslyInvalidateReady.run();
                         InternalConnection conn = null;
                         try {
-                            conn = pool.get(new OperationContext(), TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS);
+                            conn = pool.get(createOperationContext(timeoutSettings));
                         } catch (MongoConnectionPoolClearedException e) {
                             // expected because we spontaneously invalidate `pool`
                         } finally {
@@ -542,10 +554,13 @@ private static void useConcurrently(final DefaultConnectionPool pool, final int
                 }));
             } else if (checkoutAsync) {//check out asynchronously and check in
                 tasks.add(executor.submit(() -> {
-                    while (!(duration.expired() || Thread.currentThread().isInterrupted())) {
+                    while (!Thread.currentThread().isInterrupted()) {
+                        if (TimePointTest.hasExpired(timeout)) {
+                            break;
+                        }
                         spontaneouslyInvalidateReady.run();
                         CompletableFuture<InternalConnection> futureCheckOutCheckIn = new CompletableFuture<>();
-                        pool.getAsync(new OperationContext(), (conn, t) -> {
+                        pool.getAsync(createOperationContext(timeoutSettings), (conn, t) -> {
                             if (t != null) {
                                 if (t instanceof MongoConnectionPoolClearedException) {
                                     futureCheckOutCheckIn.complete(null); // expected because we spontaneously invalidate `pool`
@@ -590,23 +605,24 @@ private static void sleepMillis(final long millis) {
      * This results in acquiring permits to open a connection and leaving them acquired.
      */
     private static void acquireOpenPermits(final DefaultConnectionPool pool, final int openPermitsCount,
-                                           final InfiniteCheckoutEmulation infiniteEmulation,
-                                           final ControllableConnectionFactory controllableConnFactory,
-                                           final TestConnectionPoolListener listener) throws TimeoutException, InterruptedException {
+            final InfiniteCheckoutEmulation infiniteEmulation,
+            final ControllableConnectionFactory controllableConnFactory,
+            final TestConnectionPoolListener listener,
+            final TimeoutSettings timeoutSettings) throws TimeoutException, InterruptedException {
         assertTrue(openPermitsCount <= DEFAULT_MAX_CONNECTING);
         int initialCreatedEventCount = listener.countEvents(ConnectionCreatedEvent.class);
         switch (infiniteEmulation) {
             case INFINITE_CALLBACK: {
                 for (int i = 0; i < openPermitsCount; i++) {
                     SingleResultCallback<InternalConnection> infiniteCallback = (result, t) -> sleepMillis(MAX_VALUE);
-                    pool.getAsync(new OperationContext(), infiniteCallback);
+                    pool.getAsync(createOperationContext(timeoutSettings), infiniteCallback);
                 }
                 break;
             }
             case INFINITE_OPEN: {
                 controllableConnFactory.openDurationHandle.set(Duration.ofMillis(MAX_VALUE), openPermitsCount);
                 for (int i = 0; i < openPermitsCount; i++) {
-                    pool.getAsync(new OperationContext(), (result, t) -> {});
+                    pool.getAsync(createOperationContext(timeoutSettings), (result, t) -> {});
                 }
                 controllableConnFactory.openDurationHandle.await(Duration.ofMillis(TEST_WAIT_TIMEOUT_MILLIS));
                 break;
@@ -637,15 +653,15 @@ private static ControllableConnectionFactory newControllableConnectionFactory(fi
             doAnswer(invocation -> {
                 doOpen.run();
                 return null;
-            }).when(connection).open();
+            }).when(connection).open(any());
             doAnswer(invocation -> {
-                SingleResultCallback<?> callback = invocation.getArgument(0, SingleResultCallback.class);
+                SingleResultCallback<?> callback = invocation.getArgument(1, SingleResultCallback.class);
                 asyncOpenExecutor.execute(() -> {
                     doOpen.run();
                     callback.onResult(null, null);
                 });
                 return null;
-            }).when(connection).openAsync(any());
+            }).when(connection).openAsync(any(), any());
             return connection;
         };
         return new ControllableConnectionFactory(connectionFactory, openDurationHandle);
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy
index 6a78ce97f7c..cc3e0401bb5 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy
@@ -36,6 +36,7 @@ import javax.security.auth.Subject
 import javax.security.auth.login.LoginContext
 
 import static com.mongodb.AuthenticationMechanism.GSSAPI
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getClusterConnectionMode
 import static com.mongodb.ClusterFixture.getConnectionString
 import static com.mongodb.ClusterFixture.getCredential
@@ -57,7 +58,7 @@ class GSSAPIAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         thrown(MongoCommandException)
@@ -76,7 +77,7 @@ class GSSAPIAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -98,7 +99,7 @@ class GSSAPIAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         thrown(MongoSecurityException)
@@ -130,7 +131,7 @@ class GSSAPIAuthenticationSpecification extends Specification {
         def connection = createConnection(async, getMongoCredential(subject))
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -174,7 +175,7 @@ class GSSAPIAuthenticationSpecification extends Specification {
         def connection = createConnection(async, getMongoCredential(saslClientProperties))
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -218,10 +219,10 @@ class GSSAPIAuthenticationSpecification extends Specification {
     private static void openConnection(final InternalConnection connection, final boolean async) {
         if (async) {
             FutureResultCallback<Void> futureResultCallback = new FutureResultCallback<Void>()
-            connection.openAsync(futureResultCallback)
+            connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
             futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS)
         } else {
-            connection.open()
+            connection.open(OPERATION_CONTEXT)
         }
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy
index 9f2ca47b9ee..f18a6915e38 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy
@@ -29,6 +29,7 @@ import spock.lang.Specification
 import javax.security.auth.login.LoginContext
 
 import static com.mongodb.AuthenticationMechanism.GSSAPI
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getLoginContextName
 import static com.mongodb.ClusterFixture.getPrimary
 import static com.mongodb.ClusterFixture.getServerApi
@@ -53,7 +54,7 @@ class GSSAPIAuthenticatorSpecification extends Specification {
                 .create(new ServerId(new ClusterId(), getPrimary()))
 
         when:
-        internalConnection.open()
+        internalConnection.open(OPERATION_CONTEXT)
 
         then:
         1 * subjectProvider.getSubject() >> subject
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy
index e57627ce325..e8c2a408220 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy
@@ -32,6 +32,7 @@ import spock.lang.IgnoreIf
 import spock.lang.Specification
 
 import static com.mongodb.AuthenticationMechanism.PLAIN
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getClusterConnectionMode
 import static com.mongodb.ClusterFixture.getConnectionString
 import static com.mongodb.ClusterFixture.getCredential
@@ -51,7 +52,7 @@ class PlainAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         thrown(MongoCommandException)
@@ -70,7 +71,7 @@ class PlainAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         true
@@ -89,7 +90,7 @@ class PlainAuthenticationSpecification extends Specification {
         when:
         openConnection(connection, async)
         executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')),
-                getClusterConnectionMode(), null, connection)
+                getClusterConnectionMode(), null, connection, OPERATION_CONTEXT)
 
         then:
         thrown(MongoSecurityException)
@@ -122,10 +123,10 @@ class PlainAuthenticationSpecification extends Specification {
     private static void openConnection(final InternalConnection connection, final boolean async) {
         if (async) {
             FutureResultCallback<Void> futureResultCallback = new FutureResultCallback<Void>()
-            connection.openAsync(futureResultCallback)
+            connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
             futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS)
         } else {
-            connection.open()
+            connection.open(OPERATION_CONTEXT)
         }
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java
index e2377c8efef..6ab01fdfc8a 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java
@@ -32,6 +32,7 @@
 
 import java.util.Collections;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getClusterConnectionMode;
 import static com.mongodb.ClusterFixture.getServerApi;
 import static com.mongodb.ClusterFixture.getSslSettings;
@@ -67,14 +68,14 @@ public void tearDown() {
     public void testSuccessfulAuthentication() {
         PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, password.toCharArray()),
                 getClusterConnectionMode(), getServerApi());
-        authenticator.authenticate(internalConnection, connectionDescription);
+        authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT);
     }
 
     @Test(expected = MongoSecurityException.class)
     public void testUnsuccessfulAuthentication() {
         PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, "wrong".toCharArray()),
                 getClusterConnectionMode(), getServerApi());
-        authenticator.authenticate(internalConnection, connectionDescription);
+        authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT);
     }
 
     private static MongoCredentialWithCache getCredentialWithCache(final String userName, final String source, final char[] password) {
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy
index 44205922a0a..faffded597e 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy
@@ -16,13 +16,11 @@
 
 package com.mongodb.internal.connection
 
-
 import com.mongodb.MongoCredential
 import com.mongodb.MongoSecurityException
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.async.FutureResultCallback
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.binding.AsyncClusterBinding
 import com.mongodb.internal.binding.ClusterBinding
 import com.mongodb.internal.operation.CommandReadOperation
@@ -35,10 +33,10 @@ import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.createAsyncCluster
 import static com.mongodb.ClusterFixture.createCluster
 import static com.mongodb.ClusterFixture.getBinding
-import static com.mongodb.ClusterFixture.getServerApi
 import static com.mongodb.ClusterFixture.isAuthenticated
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.MongoCredential.createCredential
@@ -95,7 +93,7 @@ class ScramSha256AuthenticationSpecification extends Specification {
 
     def dropUser(final String userName) {
         new CommandReadOperation<>('admin', new BsonDocument('dropUser', new BsonString(userName)),
-            new BsonDocumentCodec()).execute(getBinding())
+                new BsonDocumentCodec()).execute(getBinding())
     }
 
     def 'test authentication and authorization'() {
@@ -105,8 +103,7 @@ class ScramSha256AuthenticationSpecification extends Specification {
         when:
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE))
+                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT))
 
         then:
         noExceptionThrown()
@@ -127,8 +124,7 @@ class ScramSha256AuthenticationSpecification extends Specification {
         // make this synchronous
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE),
+                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT),
                         callback)
         callback.get()
 
@@ -149,8 +145,7 @@ class ScramSha256AuthenticationSpecification extends Specification {
         when:
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE))
+                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT))
 
         then:
         thrown(MongoSecurityException)
@@ -170,8 +165,8 @@ class ScramSha256AuthenticationSpecification extends Specification {
         when:
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE), callback)
+                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT),
+                        callback)
         callback.get()
 
         then:
@@ -191,8 +186,7 @@ class ScramSha256AuthenticationSpecification extends Specification {
         when:
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE))
+                .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT))
 
         then:
         noExceptionThrown()
@@ -212,8 +206,8 @@ class ScramSha256AuthenticationSpecification extends Specification {
         when:
         new CommandReadOperation<Document>('admin',
                 new BsonDocumentWrapper<Document>(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec())
-                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(),
-                        IgnorableRequestContext.INSTANCE), callback)
+                .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT),
+                        callback)
         callback.get()
 
         then:
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java
index 17dc3b6cfcf..0295e8c1f9f 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java
@@ -23,6 +23,7 @@
 import com.mongodb.internal.binding.AsyncConnectionSource;
 import com.mongodb.internal.selector.ServerAddressSelector;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getAsyncCluster;
 import static com.mongodb.ClusterFixture.getCluster;
 import static com.mongodb.assertions.Assertions.fail;
@@ -52,7 +53,8 @@ public static void waitForLastRelease(final Cluster cluster) {
     }
 
     public static void waitForLastRelease(final ServerAddress address, final Cluster cluster) {
-        ConcurrentPool<UsageTrackingInternalConnection> pool = getConnectionPool(address, cluster);
+        ConcurrentPool<UsageTrackingInternalConnection> pool = connectionPool(
+                cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer());
         long startTime = System.currentTimeMillis();
         while (pool.getInUseCount() > 0) {
             try {
@@ -68,7 +70,7 @@ public static void waitForLastRelease(final ServerAddress address, final Cluster
     }
 
     private static ConcurrentPool<UsageTrackingInternalConnection> getConnectionPool(final ServerAddress address, final Cluster cluster) {
-        return connectionPool(cluster.selectServer(new ServerAddressSelector(address), new OperationContext()).getServer());
+        return connectionPool(cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer());
     }
 
     private static void checkPool(final ServerAddress address, final Cluster cluster) {
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy
index 0f2ba70d4c0..266f4e88996 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy
@@ -34,6 +34,7 @@ import org.bson.types.ObjectId
 import java.util.concurrent.CountDownLatch
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY
 import static com.mongodb.ClusterFixture.getClusterConnectionMode
 import static com.mongodb.ClusterFixture.getCredentialWithCache
 import static com.mongodb.ClusterFixture.getPrimary
@@ -220,11 +221,12 @@ class ServerMonitorSpecification extends OperationFunctionalSpecification {
             }
         }
         serverMonitor = new DefaultServerMonitor(new ServerId(new ClusterId(), address), ServerSettings.builder().build(),
-                new InternalStreamConnectionFactory(SINGLE, new SocketStreamFactory(new DefaultInetAddressResolver(),
+                        new InternalStreamConnectionFactory(SINGLE, new SocketStreamFactory(new DefaultInetAddressResolver(),
                         SocketSettings.builder().connectTimeout(500, TimeUnit.MILLISECONDS).build(), getSslSettings()),
                         getCredentialWithCache(), null, null, [], LoggerSettings.builder().build(), null,
                         getServerApi()),
-                getClusterConnectionMode(), getServerApi(), false, SameObjectProvider.initialized(sdam))
+                getClusterConnectionMode(), getServerApi(), false, SameObjectProvider.initialized(sdam),
+                OPERATION_CONTEXT_FACTORY)
         serverMonitor.start()
         serverMonitor
     }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java
index e715bfb5cd1..ae7166300e8 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java
@@ -25,8 +25,6 @@
 import com.mongodb.connection.ConnectionPoolSettings;
 import com.mongodb.connection.ServerSettings;
 import com.mongodb.connection.SocketSettings;
-import com.mongodb.internal.IgnorableRequestContext;
-import com.mongodb.internal.binding.StaticBindingContext;
 import com.mongodb.internal.selector.ServerAddressSelector;
 import com.mongodb.internal.validator.NoOpFieldNameValidator;
 import org.bson.BsonDocument;
@@ -38,6 +36,8 @@
 
 import java.util.Collections;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY;
 import static com.mongodb.ClusterFixture.getCredential;
 import static com.mongodb.ClusterFixture.getDefaultDatabaseName;
 import static com.mongodb.ClusterFixture.getPrimary;
@@ -66,8 +66,7 @@ private void setUpCluster(final ServerAddress serverAddress) {
                 clusterSettings,
                 new DefaultClusterableServerFactory(ServerSettings.builder().build(),
                         ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(),
-                        streamFactory, streamFactory, getCredential(),
-
+                        OPERATION_CONTEXT_FACTORY, streamFactory, OPERATION_CONTEXT_FACTORY, streamFactory, getCredential(),
                         LoggerSettings.builder().build(), null, null, null,
                         Collections.emptyList(), getServerApi(), false));
     }
@@ -93,7 +92,7 @@ public void shouldGetServerWithOkDescription() {
         setUpCluster(getPrimary());
 
         // when
-        ServerTuple serverTuple = cluster.selectServer(clusterDescription -> getPrimaries(clusterDescription), new OperationContext());
+        ServerTuple serverTuple = cluster.selectServer(clusterDescription -> getPrimaries(clusterDescription), OPERATION_CONTEXT);
 
         // then
         assertTrue(serverTuple.getServerDescription().isOk());
@@ -102,17 +101,16 @@ public void shouldGetServerWithOkDescription() {
     @Test
     public void shouldSuccessfullyQueryASecondaryWithPrimaryReadPreference() {
         // given
+        OperationContext operationContext = OPERATION_CONTEXT;
         ServerAddress secondary = getSecondary();
         setUpCluster(secondary);
         String collectionName = getClass().getName();
-        Connection connection = cluster.selectServer(new ServerAddressSelector(secondary), new OperationContext()).getServer()
-                .getConnection(new OperationContext());
+        Connection connection = cluster.selectServer(new ServerAddressSelector(secondary), operationContext).getServer()
+                .getConnection(operationContext);
 
         // when
         BsonDocument result = connection.command(getDefaultDatabaseName(), new BsonDocument("count", new BsonString(collectionName)),
-                new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(),
-                new StaticBindingContext(NoOpSessionContext.INSTANCE, getServerApi(), IgnorableRequestContext.INSTANCE,
-                        new OperationContext()));
+                new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext);
 
         // then
         assertEquals(new BsonDouble(1.0).intValue(), result.getNumber("ok").intValue());
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy
index ad5af2f6768..f652c2a0771 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy
@@ -30,6 +30,9 @@ import javax.net.ssl.SSLSocket
 import javax.net.ssl.SSLSocketFactory
 import java.lang.reflect.Method
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
+import static com.mongodb.ClusterFixture.createOperationContext
 import static com.mongodb.ClusterFixture.getPrimary
 import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -44,8 +47,10 @@ class SocketStreamHelperSpecification extends Specification {
                 .readTimeout(10, SECONDS)
                 .build()
 
+        def operationContext = createOperationContext(TIMEOUT_SETTINGS.withReadTimeoutMS(socketSettings.getReadTimeout(MILLISECONDS)))
+
         when:
-        SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
+        SocketStreamHelper.initialize(operationContext, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
                 socketSettings, SslSettings.builder().build())
 
         then:
@@ -78,7 +83,7 @@ class SocketStreamHelperSpecification extends Specification {
         Socket socket = SocketFactory.default.createSocket()
 
         when:
-        SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
+        SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
                 SocketSettings.builder().build(), SslSettings.builder().build())
 
         then:
@@ -94,8 +99,8 @@ class SocketStreamHelperSpecification extends Specification {
         SSLSocket socket = SSLSocketFactory.default.createSocket()
 
         when:
-        SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), SocketSettings.
-                builder().build(), sslSettings)
+        SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
+                SocketSettings.builder().build(), sslSettings)
 
         then:
         socket.getSSLParameters().endpointIdentificationAlgorithm == (sslSettings.invalidHostNameAllowed ? null : 'HTTPS')
@@ -115,7 +120,7 @@ class SocketStreamHelperSpecification extends Specification {
         SSLSocket socket = SSLSocketFactory.default.createSocket()
 
         when:
-        SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
+        SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
                 SocketSettings.builder().build(), sslSettings)
 
         then:
@@ -134,7 +139,7 @@ class SocketStreamHelperSpecification extends Specification {
         Socket socket = SocketFactory.default.createSocket()
 
         when:
-        SocketStreamHelper.initialize(socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
+        SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0),
                 SocketSettings.builder().build(), SslSettings.builder().enabled(true).build())
 
         then:
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy
index 7fcf694723c..42886648a2c 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy
@@ -1,18 +1,19 @@
 package com.mongodb.internal.connection
 
-import com.mongodb.spi.dns.InetAddressResolver
-import util.spock.annotations.Slow
 import com.mongodb.MongoSocketOpenException
 import com.mongodb.ServerAddress
 import com.mongodb.connection.SocketSettings
 import com.mongodb.connection.SslSettings
+import com.mongodb.spi.dns.InetAddressResolver
 import spock.lang.Ignore
 import spock.lang.IgnoreIf
 import spock.lang.Specification
+import util.spock.annotations.Slow
 
 import javax.net.SocketFactory
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getSslSettings
 
 class StreamSocketAddressSpecification extends Specification {
@@ -43,7 +44,7 @@ class StreamSocketAddressSpecification extends Specification {
         def socketStream = new SocketStream(serverAddress, null, socketSettings, sslSettings, socketFactory, bufferProvider)
 
         when:
-        socketStream.open()
+        socketStream.open(OPERATION_CONTEXT)
 
         then:
         !socket0.isConnected()
@@ -82,7 +83,7 @@ class StreamSocketAddressSpecification extends Specification {
         def socketStream = new SocketStream(serverAddress, inetAddressResolver, socketSettings, sslSettings, socketFactory, bufferProvider)
 
         when:
-        socketStream.open()
+        socketStream.open(OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketOpenException)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java
index c8274f382fc..704dea56f44 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java
@@ -43,6 +43,8 @@
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
 import static com.mongodb.ClusterFixture.TIMEOUT;
 import static com.mongodb.internal.connection.InternalStreamConnection.getSecuritySensitiveCommands;
@@ -178,29 +180,50 @@ public CommandFailedEvent getCommandFailedEvent(final String commandName) {
                 .orElseThrow(() -> new IllegalArgumentException(commandName + " not found in command failed event list"));
     }
 
-    public List<CommandEvent> getCommandStartedEvents() {
-        return getCommandStartedEvents(Integer.MAX_VALUE);
+    public List<CommandFailedEvent> getCommandFailedEvents() {
+        return getEvents(CommandFailedEvent.class, Integer.MAX_VALUE);
     }
 
-    private List<CommandEvent> getCommandStartedEvents(final int maxEvents) {
+    public List<CommandFailedEvent> getCommandFailedEvents(final String commandName) {
+        return getEvents(CommandFailedEvent.class,
+                commandEvent -> commandEvent.getCommandName().equals(commandName),
+                Integer.MAX_VALUE);
+    }
+
+    public List<CommandStartedEvent> getCommandStartedEvents() {
+        return getEvents(CommandStartedEvent.class, Integer.MAX_VALUE);
+    }
+
+    public List<CommandStartedEvent> getCommandStartedEvents(final String commandName) {
+        return getEvents(CommandStartedEvent.class,
+                commandEvent -> commandEvent.getCommandName().equals(commandName),
+                Integer.MAX_VALUE);
+    }
+
+    public List<CommandSucceededEvent> getCommandSucceededEvents() {
+        return getEvents(CommandSucceededEvent.class, Integer.MAX_VALUE);
+    }
+
+    private <T extends CommandEvent> List<T> getEvents(final Class<T> type, final int maxEvents) {
+      return getEvents(type, e -> true, maxEvents);
+    }
+
+    private <T extends CommandEvent> List<T> getEvents(final Class<T> type,
+                                                       final Predicate<? super CommandEvent> filter,
+                                                       final int maxEvents) {
         lock.lock();
         try {
-            List<CommandEvent> commandStartedEvents = new ArrayList<>();
-            for (CommandEvent cur : getEvents()) {
-                if (cur instanceof CommandStartedEvent) {
-                    commandStartedEvents.add(cur);
-                }
-                if (commandStartedEvents.size() == maxEvents) {
-                    break;
-                }
-            }
-            return commandStartedEvents;
+            return getEvents().stream()
+                    .filter(e -> e.getClass() == type)
+                    .filter(filter)
+                    .map(type::cast)
+                    .limit(maxEvents).collect(Collectors.toList());
         } finally {
             lock.unlock();
         }
     }
 
-    public List<CommandEvent> waitForStartedEvents(final int numEvents) {
+    public List<CommandStartedEvent> waitForStartedEvents(final int numEvents) {
         lock.lock();
         try {
             while (!hasCompletedEvents(numEvents)) {
@@ -212,7 +235,7 @@ public List<CommandEvent> waitForStartedEvents(final int numEvents) {
                     throw interruptAndCreateMongoInterruptedException("Interrupted waiting for event", e);
                 }
             }
-            return getCommandStartedEvents(numEvents);
+            return getEvents(CommandStartedEvent.class, numEvents);
         } finally {
             lock.unlock();
         }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy
index 8477a91cc43..a3e309a1f5f 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy
@@ -16,7 +16,7 @@
 
 package com.mongodb.internal.operation
 
-import com.mongodb.MongoExecutionTimeoutException
+
 import com.mongodb.MongoNamespace
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadConcern
@@ -51,9 +51,8 @@ import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
 import static TestOperationHelper.getKeyPattern
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.collectCursorResults
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.getAsyncCluster
 import static com.mongodb.ClusterFixture.getBinding
@@ -67,8 +66,6 @@ import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION
-import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 
 class AggregateOperationSpecification extends OperationFunctionalSpecification {
 
@@ -87,8 +84,6 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         operation.getAllowDiskUse() == null
         operation.getBatchSize() == null
         operation.getCollation() == null
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getPipeline() == []
     }
 
@@ -102,15 +97,11 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
                 .batchSize(10)
                 .collation(defaultCollation)
                 .hint(hint)
-                .maxAwaitTime(10, MILLISECONDS)
-                .maxTime(10, MILLISECONDS)
 
         then:
         operation.getAllowDiskUse()
         operation.getBatchSize() == 10
         operation.getCollation() == defaultCollation
-        operation.getMaxAwaitTime(MILLISECONDS) == 10
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getHint() == hint
     }
 
@@ -142,18 +133,25 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         when:
         def pipeline = [new BsonDocument('$match', new BsonDocument('a', new BsonString('A')))]
         def operation = new AggregateOperation<Document>(helper.namespace, pipeline, new DocumentCodec())
+
+        def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName()))
+                .append('pipeline', new BsonArray(pipeline))
+                .append('cursor', new BsonDocument())
+
+        then:
+        testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult)
+
+        when:
+        operation = new AggregateOperation<Document>(helper.namespace, pipeline, new DocumentCodec())
                 .allowDiskUse(true)
                 .batchSize(10)
                 .collation(defaultCollation)
-                .maxAwaitTime(15, MILLISECONDS)
-                .maxTime(10, MILLISECONDS)
 
-        def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName()))
+        expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName()))
                 .append('pipeline', new BsonArray(pipeline))
                 .append('allowDiskUse', new BsonBoolean(true))
                 .append('collation', defaultCollation.asDocument())
                 .append('cursor', new BsonDocument('batchSize', new BsonInt32(10)))
-                .append('maxTimeMS', new BsonInt32(10))
 
         then:
         testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult)
@@ -244,7 +242,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         results.containsAll(['Pete', 'Sam'])
 
         cleanup:
-        new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding(getCluster()))
+        new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED)
+                .execute(getBinding(getCluster()))
 
         where:
         async << [true, false]
@@ -267,7 +266,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should allow disk usage'() {
         when:
-        AggregateOperation operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec()).allowDiskUse(allowDiskUse)
+        AggregateOperation operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec())
+                .allowDiskUse(allowDiskUse)
         def cursor = operation.execute(getBinding())
 
         then:
@@ -279,7 +279,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should allow batch size'() {
         when:
-        AggregateOperation operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec()).batchSize(batchSize)
+        AggregateOperation operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec())
+                .batchSize(batchSize)
         def cursor = operation.execute(getBinding())
 
         then:
@@ -289,25 +290,6 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         batchSize << [null, 0, 10]
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec()).maxTime(1, SECONDS)
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     @IgnoreIf({ serverVersionLessThan(3, 6) })
     def 'should be able to explain an empty pipeline'() {
         given:
@@ -367,8 +349,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
     def 'should apply comment'() {
         given:
         def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile'))
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec())
-                .execute(getBinding())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)),
+                new BsonDocumentCodec()).execute(getBinding())
         def expectedComment = 'this is a comment'
         def operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec())
                 .comment(new BsonString(expectedComment))
@@ -381,50 +363,30 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         ((Document) profileDocument.get('command')).get('comment') == expectedComment
 
         cleanup:
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec())
-                .execute(getBinding())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)),
+                new BsonDocumentCodec()).execute(getBinding())
         profileCollectionHelper.drop()
 
         where:
         async << [true, false]
     }
 
-    @IgnoreIf({ isSharded() || serverVersionLessThan(3, 2) })
-    def 'should be able to respect maxTime with pipeline'() {
-        given:
-        enableMaxTimeFailPoint()
-        AggregateOperation operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec())
-                .maxTime(10, MILLISECONDS)
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     def 'should add read concern to command'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readConnectionSource >> source
-        binding.sessionContext >> sessionContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName()))
                 .append('pipeline', new BsonArray())
                 .append('cursor', new BsonDocument())
-        appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
+        appendReadConcernToCommand(operationContext.getSessionContext(), MIN_WIRE_VERSION, commandDocument)
 
         def operation = new AggregateOperation<Document>(getNamespace(), [], new DocumentCodec())
 
@@ -434,7 +396,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, binding) >>
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >>
                 new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                         .append('ns', new BsonString(getNamespace().getFullName()))
                         .append('firstBatch', new BsonArrayWrapper([])))
@@ -453,14 +415,13 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add read concern to command asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
-        binding.serverApi >> null
-        binding.readPreference >> ReadPreference.primary()
+        binding.operationContext >> operationContext
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
-        source.serverApi >> null
+        source.operationContext >> operationContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
         def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName()))
@@ -476,7 +437,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> {
+        1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> {
             it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                     .append('ns', new BsonString(getNamespace().getFullName()))
                     .append('firstBatch', new BsonArrayWrapper([]))), null)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy
index a7aa377e855..496e7311949 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 import com.mongodb.MongoCommandException
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.MongoWriteConcernException
 import com.mongodb.OperationFunctionalSpecification
@@ -29,6 +28,7 @@ import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.client.model.Filters
 import com.mongodb.client.model.ValidationOptions
 import com.mongodb.client.test.CollectionHelper
+import com.mongodb.internal.client.model.AggregationLevel
 import org.bson.BsonArray
 import org.bson.BsonBoolean
 import org.bson.BsonDocument
@@ -40,17 +40,12 @@ import org.bson.codecs.BsonValueCodecProvider
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.getBinding
 import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet
 import static com.mongodb.ClusterFixture.isSharded
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.WriteConcern.ACKNOWLEDGED
 import static com.mongodb.client.model.Filters.gte
-import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
 
 class AggregateToCollectionOperationSpecification extends OperationFunctionalSpecification {
@@ -71,11 +66,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))]
 
         when:
-        AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED)
 
         then:
         operation.getAllowDiskUse() == null
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getPipeline() == pipeline
         operation.getBypassDocumentValidation() == null
         operation.getWriteConcern() == ACKNOWLEDGED
@@ -87,15 +81,14 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))]
 
         when:
-        AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, WriteConcern.MAJORITY)
+        AggregateToCollectionOperation operation =
+                createOperation(getNamespace(), pipeline, WriteConcern.MAJORITY)
                 .allowDiskUse(true)
-                .maxTime(10, MILLISECONDS)
                 .bypassDocumentValidation(true)
                 .collation(defaultCollation)
 
         then:
         operation.getAllowDiskUse()
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getBypassDocumentValidation() == true
         operation.getWriteConcern() == WriteConcern.MAJORITY
         operation.getCollation() == defaultCollation
@@ -106,15 +99,13 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))]
 
         when:
-        AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.DEFAULT)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ReadConcern.DEFAULT)
                 .allowDiskUse(true)
-                .maxTime(10, MILLISECONDS)
                 .bypassDocumentValidation(true)
                 .collation(defaultCollation)
 
         then:
         operation.getAllowDiskUse()
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getBypassDocumentValidation() == true
         operation.getReadConcern() == ReadConcern.DEFAULT
         operation.getCollation() == defaultCollation
@@ -122,7 +113,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
 
     def 'should not accept an empty pipeline'() {
         when:
-        new AggregateToCollectionOperation(getNamespace(), [], ACKNOWLEDGED)
+        createOperation(getNamespace(), [], ACKNOWLEDGED)
 
 
         then:
@@ -131,10 +122,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
 
     def 'should be able to output to a collection'() {
         when:
-        AggregateToCollectionOperation operation =
-                new AggregateToCollectionOperation(getNamespace(),
-                                                   [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))],
-                        ACKNOWLEDGED)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
+                [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))],
+                ACKNOWLEDGED)
         execute(operation, async)
 
         then:
@@ -147,9 +137,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
     @IgnoreIf({ serverVersionLessThan(4, 2) })
     def 'should be able to merge into a collection'() {
         when:
-        AggregateToCollectionOperation operation =
-                new AggregateToCollectionOperation(getNamespace(),
-                        [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))])
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
+                [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))])
         execute(operation, async)
 
         then:
@@ -161,11 +150,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
 
     def 'should be able to match then output to a collection'() {
         when:
-        AggregateToCollectionOperation operation =
-                new AggregateToCollectionOperation(getNamespace(),
-                                                   [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))),
-                                                    new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))],
-                        ACKNOWLEDGED)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
+                [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))),
+                 new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED)
         execute(operation, async)
 
         then:
@@ -175,39 +162,15 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         async << [true, false]
     }
 
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        AggregateToCollectionOperation operation =
-                new AggregateToCollectionOperation(getNamespace(),
-                                                   [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))),
-                                                    new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))],
-                        ACKNOWLEDGED)
-                        .maxTime(1, SECONDS)
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() })
     def 'should throw on write concern error'() {
         given:
-        AggregateToCollectionOperation operation =
-                new AggregateToCollectionOperation(getNamespace(),
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
                         [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))],
                         new WriteConcern(5))
 
         when:
-        async ? executeAsync(operation) : operation.execute(getBinding())
+        execute(operation, async)
 
         then:
         def ex = thrown(MongoWriteConcernException)
@@ -227,8 +190,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 9 }'))
 
         when:
-        def operation = new AggregateToCollectionOperation(getNamespace(), [BsonDocument.parse('{$out: "collectionOut"}')],
-                ACKNOWLEDGED)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
+                [BsonDocument.parse('{$out: "collectionOut"}')], ACKNOWLEDGED)
         execute(operation, async)
 
         then:
@@ -256,7 +219,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
     def 'should create the expected command'() {
         when:
         def pipeline = [BsonDocument.parse('{$out: "collectionOut"}')]
-        def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.MAJORITY, WriteConcern.MAJORITY)
+        AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline,
+                ReadConcern.MAJORITY, WriteConcern.MAJORITY)
                 .bypassDocumentValidation(true)
         def expectedCommand = new BsonDocument('aggregate', new BsonString(getNamespace().getCollectionName()))
                 .append('pipeline', new BsonArray(pipeline))
@@ -298,7 +262,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         getCollectionHelper().insertDocuments(BsonDocument.parse('{_id: 1, str: "foo"}'))
         def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}'),
                         new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))]
-        def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED).collation(defaultCollation)
+        AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED)
                 .collation(caseInsensitiveCollation)
 
         when:
@@ -315,10 +279,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
     def 'should apply comment'() {
         given:
         def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile'))
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec())
-                .execute(getBinding())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)),
+                new BsonDocumentCodec()).execute(getBinding())
         def expectedComment = 'this is a comment'
-        def operation = new AggregateToCollectionOperation(getNamespace(),
+        AggregateToCollectionOperation operation = createOperation(getNamespace(),
                 [Aggregates.out('outputCollection').toBsonDocument(BsonDocument, registry)], ACKNOWLEDGED)
                 .comment(new BsonString(expectedComment))
 
@@ -330,11 +294,24 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe
         ((Document) profileDocument.get('command')).get('comment') == expectedComment
 
         cleanup:
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec())
-                .execute(getBinding())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)),
+                new BsonDocumentCodec()).execute(getBinding())
         profileCollectionHelper.drop()
 
         where:
         async << [true, false]
     }
+
+    def createOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline) {
+        new AggregateToCollectionOperation(namespace, pipeline, null, null, AggregationLevel.COLLECTION)
+    }
+
+    def createOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline, final WriteConcern writeConcern) {
+        new AggregateToCollectionOperation(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION)
+    }
+
+    def createOperation(final MongoNamespace namespace, final List<BsonDocument> pipeline, final ReadConcern readConcern) {
+        new AggregateToCollectionOperation(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION)
+    }
+
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java
index 3b8addf6596..93449a6558b 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java
@@ -21,6 +21,7 @@
 import com.mongodb.MongoQueryException;
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerCursor;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.OperationTest;
 import com.mongodb.internal.binding.AsyncConnectionSource;
@@ -105,7 +106,7 @@ void cleanup() {
     @DisplayName("server cursor should not be null")
     void theServerCursorShouldNotBeNull() {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertNotNull(cursor.getServerCursor());
@@ -115,7 +116,7 @@ void theServerCursorShouldNotBeNull() {
     @DisplayName("should get Exceptions for operations on the cursor after closing")
     void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() {
         BsonDocument commandResult = executeFindCommand(5);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         cursor.close();
@@ -130,7 +131,7 @@ void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() {
     @DisplayName("should throw an Exception when going off the end")
     void shouldThrowAnExceptionWhenGoingOffTheEnd() {
         BsonDocument commandResult = executeFindCommand(2, 1);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         cursorNext();
@@ -144,7 +145,7 @@ void shouldThrowAnExceptionWhenGoingOffTheEnd() {
     @DisplayName("test normal exhaustion")
     void testNormalExhaustion() {
         BsonDocument commandResult = executeFindCommand();
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertEquals(10, cursorFlatten().size());
@@ -155,7 +156,7 @@ void testNormalExhaustion() {
     @DisplayName("test limit exhaustion")
     void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) {
         BsonDocument commandResult = executeFindCommand(limit, batchSize);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
 
@@ -174,7 +175,7 @@ void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, fi
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertFalse(cursor.isClosed());
@@ -197,7 +198,7 @@ void testTailableInterrupt() throws InterruptedException {
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         CountDownLatch latch = new CountDownLatch(1);
@@ -230,7 +231,7 @@ void testTailableInterrupt() throws InterruptedException {
     void shouldKillCursorIfLimitIsReachedOnInitialQuery() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 10);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertNotNull(cursorNext());
@@ -243,7 +244,7 @@ void shouldKillCursorIfLimitIsReachedOnInitialQuery() {
     void shouldKillCursorIfLimitIsReachedOnGetMore() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 3);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         ServerCursor serverCursor = cursor.getServerCursor();
@@ -261,8 +262,9 @@ void shouldKillCursorIfLimitIsReachedOnGetMore() {
     @DisplayName("should release connection source if limit is reached on initial query")
     void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() {
         assumeFalse(isSharded());
+
         BsonDocument commandResult = executeFindCommand(5, 10);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1));
@@ -275,7 +277,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() {
     void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 3);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertNotNull(cursorNext());
@@ -288,7 +290,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() {
     @DisplayName("test limit with get more")
     void testLimitWithGetMore() {
         BsonDocument commandResult = executeFindCommand(5, 2);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertNotNull(cursorNext());
@@ -311,7 +313,7 @@ void testLimitWithLargeDocuments() {
         );
 
         BsonDocument commandResult = executeFindCommand(300, 0);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertEquals(300, cursorFlatten().size());
@@ -321,7 +323,7 @@ void testLimitWithLargeDocuments() {
     @DisplayName("should respect batch size")
     void shouldRespectBatchSize() {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         assertEquals(2, cursor.getBatchSize());
@@ -338,7 +340,7 @@ void shouldRespectBatchSize() {
     @DisplayName("should throw cursor not found exception")
     void shouldThrowCursorNotFoundException() throws Throwable {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new AsyncCommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                                null, connectionSource, connection);
 
         ServerCursor serverCursor = cursor.getServerCursor();
@@ -347,7 +349,7 @@ void shouldThrowCursorNotFoundException() throws Throwable {
         this.<BsonDocument>block(cb -> localConnection.commandAsync(getNamespace().getDatabaseName(),
                 new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName()))
                         .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))),
-                NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource, cb));
+                NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext(), cb));
         localConnection.release();
 
         cursorNext();
@@ -412,9 +414,8 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim
         }
 
         BsonDocument results = block(cb -> connection.commandAsync(getDatabaseName(), findCommand,
-                NO_OP_FIELD_NAME_VALIDATOR, readPreference,
-                CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH),
-                connectionSource, cb));
+                NO_OP_FIELD_NAME_VALIDATOR, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH),
+                connectionSource.getOperationContext(), cb));
 
         assertNotNull(results);
         return results;
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java
new file mode 100644
index 00000000000..53b2d78eae2
--- /dev/null
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.operation;
+
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoSocketException;
+import com.mongodb.ServerAddress;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.connection.ServerType;
+import com.mongodb.connection.ServerVersion;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.async.SingleResultCallback;
+import com.mongodb.internal.binding.AsyncConnectionSource;
+import com.mongodb.internal.connection.AsyncConnection;
+import com.mongodb.internal.connection.OperationContext;
+import org.bson.BsonArray;
+import org.bson.BsonDocument;
+import org.bson.BsonInt32;
+import org.bson.BsonInt64;
+import org.bson.BsonString;
+import org.bson.Document;
+import org.bson.codecs.Decoder;
+import org.bson.codecs.DocumentCodec;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+class AsyncCommandBatchCursorTest {
+
+    private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test");
+    private static final BsonInt64 CURSOR_ID = new BsonInt64(1);
+    private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1))
+            .append("cursor",
+                    new BsonDocument("ns", new BsonString(NAMESPACE.getFullName()))
+                            .append("id", CURSOR_ID)
+                            .append("firstBatch", new BsonArrayWrapper<>(new BsonArray())));
+
+    private static final Decoder<Document> DOCUMENT_CODEC = new DocumentCodec();
+
+
+    private AsyncConnection mockConnection;
+    private ConnectionDescription mockDescription;
+    private AsyncConnectionSource connectionSource;
+    private OperationContext operationContext;
+    private TimeoutContext timeoutContext;
+    private ServerDescription serverDescription;
+
+    @BeforeEach
+    void setUp() {
+        ServerVersion serverVersion = new ServerVersion(3, 6);
+
+        mockConnection = mock(AsyncConnection.class, "connection");
+        mockDescription = mock(ConnectionDescription.class);
+        when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList()));
+        when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(mockConnection.getDescription()).thenReturn(mockDescription);
+        when(mockConnection.retain()).thenReturn(mockConnection);
+
+        connectionSource = mock(AsyncConnectionSource.class);
+        operationContext = mock(OperationContext.class);
+        timeoutContext = mock(TimeoutContext.class);
+        serverDescription = mock(ServerDescription.class);
+        when(operationContext.getTimeoutContext()).thenReturn(timeoutContext);
+        when(connectionSource.getOperationContext()).thenReturn(operationContext);
+        doAnswer(invocation -> {
+            SingleResultCallback<AsyncConnection> callback = invocation.getArgument(0);
+            callback.onResult(mockConnection, null);
+            return null;
+        }).when(connectionSource).getConnection(any());
+        when(connectionSource.getServerDescription()).thenReturn(serverDescription);
+    }
+
+
+    @Test
+    void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() {
+        //given
+        doAnswer(invocation -> {
+            SingleResultCallback<Object> argument = invocation.getArgument(6);
+            argument.onResult(null, new MongoSocketException("test", new ServerAddress()));
+            return null;
+        }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any());
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+        AsyncCommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+
+        //when
+        commandBatchCursor.next((result, t) -> {
+            Assertions.assertNull(result);
+            Assertions.assertNotNull(t);
+            Assertions.assertEquals(MongoSocketException.class, t.getClass());
+        });
+
+        //then
+        commandBatchCursor.close();
+        verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any());
+    }
+
+
+    @Test
+    void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() {
+        //given
+        doAnswer(invocation -> {
+            SingleResultCallback<Object> argument = invocation.getArgument(6);
+            argument.onResult(null, new MongoOperationTimeoutException("test"));
+            return null;
+        }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any());
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(timeoutContext.hasTimeoutMS()).thenReturn(true);
+
+        AsyncCommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+
+        //when
+        commandBatchCursor.next((result, t) -> {
+            Assertions.assertNull(result);
+            Assertions.assertNotNull(t);
+            Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass());
+        });
+
+        commandBatchCursor.close();
+
+
+        //then
+        verify(mockConnection, times(2)).commandAsync(any(),
+                any(), any(), any(), any(), any(), any());
+        verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any());
+        verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any());
+    }
+
+    @Test
+    void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() {
+        //given
+        doAnswer(invocation -> {
+            SingleResultCallback<Object> argument = invocation.getArgument(6);
+            argument.onResult(null, new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress())));
+            return null;
+        }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any());
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(timeoutContext.hasTimeoutMS()).thenReturn(true);
+
+        AsyncCommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+
+        //when
+        commandBatchCursor.next((result, t) -> {
+            Assertions.assertNull(result);
+            Assertions.assertNotNull(t);
+            Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass());
+        });
+
+        commandBatchCursor.close();
+
+        //then
+        verify(mockConnection, times(1)).commandAsync(any(),
+                any(), any(), any(), any(), any(), any());
+        verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any());
+        verify(mockConnection, never()).commandAsync(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any());
+    }
+
+
+    private AsyncCommandBatchCursor<Document> createBatchCursor() {
+        return new AsyncCommandBatchCursor<Document>(
+                TimeoutMode.CURSOR_LIFETIME,
+                COMMAND_CURSOR_DOCUMENT,
+                0,
+                0,
+                DOCUMENT_CODEC,
+                null,
+                connectionSource,
+                mockConnection);
+    }
+
+}
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy
index 129289bfbba..34187b34e62 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy
@@ -52,6 +52,7 @@ import org.bson.codecs.DocumentCodec
 import org.bson.codecs.ValueCodecProvider
 import spock.lang.IgnoreIf
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.getAsyncCluster
 import static com.mongodb.ClusterFixture.getCluster
 import static com.mongodb.ClusterFixture.isStandalone
@@ -60,7 +61,6 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.client.model.changestream.ChangeStreamDocument.createCodec
 import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease
 import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
 
 @IgnoreIf({ !(serverVersionAtLeast(3, 6) && !isStandalone()) })
@@ -68,32 +68,29 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
 
     def 'should have the correct defaults'() {
         when:
-        ChangeStreamOperation operation = new ChangeStreamOperation<Document>(getNamespace(), FullDocument.DEFAULT,
-                FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec())
+        ChangeStreamOperation operation = new ChangeStreamOperation<Document>(getNamespace(),
+                FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec())
 
         then:
         operation.getBatchSize() == null
         operation.getCollation() == null
         operation.getFullDocument() == FullDocument.DEFAULT
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
         operation.getPipeline() == []
         operation.getStartAtOperationTime() == null
     }
 
     def 'should set optional values correctly'() {
         when:
-        ChangeStreamOperation operation = new ChangeStreamOperation<Document>(getNamespace(), FullDocument.UPDATE_LOOKUP,
-                FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec())
+        ChangeStreamOperation operation = new ChangeStreamOperation<Document>(getNamespace(),
+                FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec())
                 .batchSize(5)
                 .collation(defaultCollation)
-                .maxAwaitTime(15, MILLISECONDS)
                 .startAtOperationTime(new BsonTimestamp(99))
 
         then:
         operation.getBatchSize() == 5
         operation.getCollation() == defaultCollation
         operation.getFullDocument() == FullDocument.UPDATE_LOOKUP
-        operation.getMaxAwaitTime(MILLISECONDS) == 15
         operation.getStartAtOperationTime() == new BsonTimestamp(99)
     }
 
@@ -115,10 +112,9 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
                 .append('firstBatch', new BsonArrayWrapper([])))
 
         def operation = new ChangeStreamOperation<Document>(namespace, FullDocument.DEFAULT,
-                FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel)
+                FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel as ChangeStreamLevel)
                 .batchSize(5)
                 .collation(defaultCollation)
-                .maxAwaitTime(15, MILLISECONDS)
                 .startAtOperationTime(new BsonTimestamp())
 
         def expectedCommand = new BsonDocument('aggregate', aggregate)
@@ -390,8 +386,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
         def helper = getHelper()
 
         def pipeline = [BsonDocument.parse('{$match: {operationType: "rename"}}')]
-        def operation = new ChangeStreamOperation<ChangeStreamDocument>(helper.getNamespace(), FullDocument.UPDATE_LOOKUP,
-                FullDocumentBeforeChange.DEFAULT, pipeline,
+        def operation = new ChangeStreamOperation<ChangeStreamDocument>(helper.getNamespace(),
+                FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline,
                 createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider())))
         def newNamespace = new MongoNamespace('JavaDriverTest', 'newCollectionName')
         helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }'))
@@ -625,8 +621,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
     def 'should support hasNext on the sync API'() {
         given:
         def helper = getHelper()
-        def operation = new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange
-                .DEFAULT, [], CODEC)
+        def operation = new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
 
         when:
         def cursor = execute(operation, false)
@@ -642,15 +638,16 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
 
     def 'should set the startAtOperationTime on the sync cursor'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(
+                Stub(SessionContext) {
+                    getReadConcern() >> ReadConcern.DEFAULT
+                    getOperationTime() >> new BsonTimestamp()
+                })
         def changeStream
         def binding = Stub(ReadBinding) {
-            getSessionContext() >> Stub(SessionContext) {
-                getReadConcern() >> ReadConcern.DEFAULT
-                getOperationTime() >> new BsonTimestamp()
-            }
-            getServerApi() >> null
+            getOperationContext() >> operationContext
             getReadConnectionSource() >> Stub(ConnectionSource) {
-                getServerApi() >> null
+                getOperationContext() >> operationContext
                 getConnection() >> Stub(Connection) {
                      command(*_) >> {
                          changeStream = getChangeStream(it[1])
@@ -666,7 +663,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
         }
 
         when: 'set resumeAfter'
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .resumeAfter(new BsonDocument())
                 .execute(binding)
 
@@ -675,7 +673,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
         !changeStream.containsKey('startAtOperationTime')
 
         when: 'set startAfter'
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .startAfter(new BsonDocument())
                 .execute(binding)
 
@@ -685,7 +684,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
 
         when: 'set startAtOperationTime'
         def startAtTime = new BsonTimestamp(42)
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .startAtOperationTime(startAtTime)
                 .execute(binding)
 
@@ -695,16 +695,17 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
 
     def 'should set the startAtOperationTime on the async cursor'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(
+                Stub(SessionContext) {
+                    getReadConcern() >> ReadConcern.DEFAULT
+                    getOperationTime() >> new BsonTimestamp()
+                })
         def changeStream
         def binding = Stub(AsyncReadBinding) {
-            getServerApi() >> null
-            getSessionContext() >> Stub(SessionContext) {
-                getReadConcern() >> ReadConcern.DEFAULT
-                getOperationTime() >> new BsonTimestamp()
-            }
+            getOperationContext() >> operationContext
             getReadConnectionSource(_) >> {
                 it.last().onResult(Stub(AsyncConnectionSource) {
-                    getServerApi() >> null
+                    getOperationContext() >> operationContext
                     getConnection(_) >> {
                         it.last().onResult(Stub(AsyncConnection) {
                             commandAsync(*_) >> {
@@ -723,7 +724,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
         }
 
         when: 'set resumeAfter'
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .resumeAfter(new BsonDocument())
                 .executeAsync(binding, Stub(SingleResultCallback))
 
@@ -732,7 +734,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
         !changeStream.containsKey('startAtOperationTime')
 
         when: 'set startAfter'
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .startAfter(new BsonDocument())
                 .executeAsync(binding, Stub(SingleResultCallback))
 
@@ -742,7 +745,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio
 
         when: 'set startAtOperationTime'
         def startAtTime = new BsonTimestamp(42)
-        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC)
+        new ChangeStreamOperation<BsonDocument>(helper.getNamespace(), FullDocument.DEFAULT,
+                FullDocumentBeforeChange.DEFAULT, [], CODEC)
                 .startAtOperationTime(startAtTime)
                 .executeAsync(binding, Stub(SingleResultCallback))
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java
index 30a74443633..7b9fd7b4e57 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java
@@ -16,11 +16,11 @@
 
 package com.mongodb.internal.operation;
 
-
 import com.mongodb.MongoCursorNotFoundException;
 import com.mongodb.MongoQueryException;
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerCursor;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.OperationTest;
 import com.mongodb.internal.binding.ConnectionSource;
@@ -104,7 +104,7 @@ void cleanup() {
     @DisplayName("server cursor should not be null")
     void theServerCursorShouldNotBeNull() {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNotNull(cursor.getServerCursor());
@@ -114,7 +114,7 @@ void theServerCursorShouldNotBeNull() {
     @DisplayName("test server address should not be null")
     void theServerAddressShouldNotNull() {
         BsonDocument commandResult = executeFindCommand();
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNotNull(cursor.getServerAddress());
@@ -124,7 +124,7 @@ void theServerAddressShouldNotNull() {
     @DisplayName("should get Exceptions for operations on the cursor after closing")
     void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() {
         BsonDocument commandResult = executeFindCommand();
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         cursor.close();
@@ -139,7 +139,7 @@ void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() {
     @DisplayName("should throw an Exception when going off the end")
     void shouldThrowAnExceptionWhenGoingOffTheEnd() {
         BsonDocument commandResult = executeFindCommand(1);
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         cursor.next();
@@ -151,7 +151,7 @@ void shouldThrowAnExceptionWhenGoingOffTheEnd() {
     @DisplayName("test cursor remove")
     void testCursorRemove() {
         BsonDocument commandResult = executeFindCommand();
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertThrows(UnsupportedOperationException.class, () -> cursor.remove());
@@ -161,7 +161,7 @@ void testCursorRemove() {
     @DisplayName("test normal exhaustion")
     void testNormalExhaustion() {
         BsonDocument commandResult = executeFindCommand();
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertEquals(10, cursorFlatten().size());
@@ -172,7 +172,7 @@ void testNormalExhaustion() {
     @DisplayName("test limit exhaustion")
     void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) {
         BsonDocument commandResult = executeFindCommand(limit, batchSize);
-        cursor = new CommandBatchCursor<>(commandResult, batchSize, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertEquals(expectedTotal, cursorFlatten().size());
@@ -191,7 +191,7 @@ void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, fi
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData);
-        cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertTrue(cursor.hasNext());
@@ -214,10 +214,9 @@ void testTryNextWithTailable() {
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
-
         List<Document> nextBatch = cursor.tryNext();
         assertNotNull(nextBatch);
         assertEquals(1, nextBatch.get(0).get("_id"));
@@ -241,7 +240,7 @@ void hasNextShouldThrowWhenCursorIsClosedInAnotherThread() throws InterruptedExc
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertTrue(cursor.hasNext());
@@ -268,10 +267,9 @@ void testMaxTimeMS() {
         long maxTimeMS = 500;
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true);
-        cursor = new CommandBatchCursor<>(commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
-
         List<Document> nextBatch = cursor.tryNext();
         assertNotNull(nextBatch);
 
@@ -293,7 +291,7 @@ void testTailableInterrupt() throws InterruptedException {
 
         BsonDocument commandResult = executeFindCommand(new BsonDocument("ts",
                 new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         CountDownLatch latch = new CountDownLatch(1);
@@ -326,7 +324,7 @@ void testTailableInterrupt() throws InterruptedException {
     void shouldKillCursorIfLimitIsReachedOnInitialQuery() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 10);
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNotNull(cursor.next());
@@ -339,7 +337,7 @@ void shouldKillCursorIfLimitIsReachedOnInitialQuery() {
     void shouldKillCursorIfLimitIsReachedOnGetMore() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 3);
-        cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         ServerCursor serverCursor = cursor.getServerCursor();
@@ -358,7 +356,7 @@ void shouldKillCursorIfLimitIsReachedOnGetMore() {
     void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 10);
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNull(cursor.getServerCursor());
@@ -371,7 +369,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() {
     void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() {
         assumeFalse(isSharded());
         BsonDocument commandResult = executeFindCommand(5, 3);
-        cursor = new CommandBatchCursor<>(commandResult, 3, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNotNull(cursor.next());
@@ -384,7 +382,7 @@ void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() {
     @DisplayName("test limit with get more")
     void testLimitWithGetMore() {
         BsonDocument commandResult = executeFindCommand(5, 2);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertNotNull(cursor.next());
@@ -405,7 +403,7 @@ void testLimitWithLargeDocuments() {
         );
 
         BsonDocument commandResult = executeFindCommand(300, 0);
-        cursor = new CommandBatchCursor<>(commandResult, 0, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertEquals(300, cursorFlatten().size());
@@ -415,7 +413,7 @@ void testLimitWithLargeDocuments() {
     @DisplayName("should respect batch size")
     void shouldRespectBatchSize() {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertEquals(2, cursor.getBatchSize());
@@ -432,7 +430,7 @@ void shouldRespectBatchSize() {
     @DisplayName("should throw cursor not found exception")
     void shouldThrowCursorNotFoundException() {
         BsonDocument commandResult = executeFindCommand(2);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         ServerCursor serverCursor = cursor.getServerCursor();
@@ -441,7 +439,7 @@ void shouldThrowCursorNotFoundException() {
         localConnection.command(getNamespace().getDatabaseName(),
                 new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName()))
                         .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))),
-                NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource);
+                NO_OP_FIELD_NAME_VALIDATOR, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext());
         localConnection.release();
 
         cursor.next();
@@ -455,7 +453,7 @@ void shouldThrowCursorNotFoundException() {
     @DisplayName("should report available documents")
     void shouldReportAvailableDocuments() {
         BsonDocument commandResult = executeFindCommand(3);
-        cursor = new CommandBatchCursor<>(commandResult, 2, 0, DOCUMENT_DECODER,
+        cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER,
                                           null, connectionSource, connection);
 
         assertEquals(3, cursor.available());
@@ -533,7 +531,7 @@ private BsonDocument executeFindCommand(final BsonDocument filter, final int lim
         BsonDocument results = connection.command(getDatabaseName(), findCommand,
                 NO_OP_FIELD_NAME_VALIDATOR, readPreference,
                 CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH),
-                connectionSource);
+                connectionSource.getOperationContext());
 
         assertNotNull(results);
         return results;
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy
index 3d99ac477a4..a9f74ca50b3 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy
@@ -16,102 +16,53 @@
 
 package com.mongodb.internal.operation
 
-import util.spock.annotations.Slow
-import com.mongodb.MongoExecutionTimeoutException
+
 import com.mongodb.OperationFunctionalSpecification
 import org.bson.BsonBinary
 import org.bson.BsonDocument
 import org.bson.BsonInt32
 import org.bson.BsonString
 import org.bson.codecs.BsonDocumentCodec
-import spock.lang.IgnoreIf
-
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.executeAsync
-import static com.mongodb.ClusterFixture.getBinding
-import static com.mongodb.ClusterFixture.isSharded
+import util.spock.annotations.Slow
 
 class CommandOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should execute read command'() {
         given:
-        def commandOperation = new CommandReadOperation<BsonDocument>(getNamespace().databaseName,
-                                                                      new BsonDocument('count', new BsonString(getCollectionName())),
-                                                                      new BsonDocumentCodec())
+        def operation = new CommandReadOperation<BsonDocument>(getNamespace().databaseName,
+                new BsonDocument('count', new BsonString(getCollectionName())),
+                new BsonDocumentCodec())
         when:
-        def result = commandOperation.execute(getBinding())
+        def result = execute(operation, async)
 
         then:
         result.getNumber('n').intValue() == 0
-    }
-
 
-    def 'should execute read command asynchronously'() {
-        given:
-        def commandOperation = new CommandReadOperation<BsonDocument>(getNamespace().databaseName,
-                                                                      new BsonDocument('count', new BsonString(getCollectionName())),
-                                                                      new BsonDocumentCodec())
-        when:
-        def result = executeAsync(commandOperation)
 
-        then:
-        result.getNumber('n').intValue() == 0
+        where:
+        async << [true, false]
     }
 
+
     @Slow
     def 'should execute command larger than 16MB'() {
-        when:
-        def result = new CommandReadOperation<>(getNamespace().databaseName,
-                                                             new BsonDocument('findAndModify', new BsonString(getNamespace().fullName))
-                                                                     .append('query', new BsonDocument('_id', new BsonInt32(42)))
-                                                                     .append('update',
-                                                                             new BsonDocument('_id', new BsonInt32(42))
-                                                                                     .append('b', new BsonBinary(
-                                                                                     new byte[16 * 1024 * 1024 - 30]))),
-                                                             new BsonDocumentCodec())
-                .execute(getBinding())
-
-        then:
-        result.containsKey('value')
-    }
-
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
         given:
-        def commandOperation = new CommandReadOperation<BsonDocument>(getNamespace().databaseName,
-                                                                      new BsonDocument('count', new BsonString(getCollectionName()))
-                                                                              .append('maxTimeMS', new BsonInt32(1)),
-                                                                      new BsonDocumentCodec())
-        enableMaxTimeFailPoint()
+        def operation = new CommandReadOperation<>(getNamespace().databaseName,
+                new BsonDocument('findAndModify', new BsonString(getNamespace().fullName))
+                        .append('query', new BsonDocument('_id', new BsonInt32(42)))
+                        .append('update',
+                                new BsonDocument('_id', new BsonInt32(42))
+                                        .append('b', new BsonBinary(
+                                                new byte[16 * 1024 * 1024 - 30]))),
+                new BsonDocumentCodec())
 
         when:
-        commandOperation.execute(getBinding())
+        def result = execute(operation, async)
 
         then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
-
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from executeAsync'() {
-        given:
-        def commandOperation = new CommandReadOperation<BsonDocument>(getNamespace().databaseName,
-                                                                      new BsonDocument('count', new BsonString(getCollectionName()))
-                                                                              .append('maxTimeMS', new BsonInt32(1)),
-                                                                      new BsonDocumentCodec())
-        enableMaxTimeFailPoint()
-
-        when:
-        executeAsync(commandOperation)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
+        result.containsKey('value')
 
-        cleanup:
-        disableMaxTimeFailPoint()
+        where:
+        async << [true, false]
     }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy
index c308e115ca8..26d7d11bc6e 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 import com.mongodb.MongoException
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadConcern
@@ -45,16 +44,13 @@ import org.bson.Document
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.getBinding
 import static com.mongodb.ClusterFixture.serverVersionAtLeast
 import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION
-import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 
 class CountDocumentsOperationSpecification extends OperationFunctionalSpecification {
 
@@ -77,20 +73,18 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
 
         then:
         operation.getFilter() == null
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getHint() == null
         operation.getLimit() == 0
         operation.getSkip() == 0
     }
 
-    def 'should set optional values correctly'(){
+    def 'should set optional values correctly'() {
         given:
         def filter = new BsonDocument('filter', new BsonInt32(1))
         def hint = new BsonString('hint')
 
         when:
         CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace())
-                .maxTime(10, MILLISECONDS)
                 .filter(filter)
                 .hint(hint)
                 .limit(20)
@@ -98,7 +92,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
 
         then:
         operation.getFilter() == filter
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getHint() == hint
         operation.getLimit() == 20
         operation.getSkip() == 30
@@ -135,24 +128,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
         async << [true, false]
     }
 
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def operation = new CountDocumentsOperation(getNamespace()).maxTime(1, SECONDS)
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     def 'should use limit with the count'() {
         when:
         def operation = new CountDocumentsOperation(getNamespace()).limit(1)
@@ -179,7 +154,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
     def 'should use hint with the count'() {
         given:
         def indexDefinition = new BsonDocument('y', new BsonInt32(1))
-        new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)])
+        new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null)
                 .execute(getBinding())
         def operation = new CountDocumentsOperation(getNamespace()).hint(indexDefinition)
 
@@ -243,11 +218,11 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
         testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult)
 
         when:
-        operation.filter(filter)
+        operation = new CountDocumentsOperation(helper.namespace)
+                .filter(filter)
                 .limit(20)
                 .skip(30)
                 .hint(hint)
-                .maxTime(10, MILLISECONDS)
                 .collation(defaultCollation)
 
         expectedCommand = expectedCommand
@@ -255,7 +230,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
                                                    new BsonDocument('$skip', new BsonInt64(30)),
                                                    new BsonDocument('$limit', new BsonInt64(20)),
                                                    pipeline.last()]))
-                .append('maxTimeMS', new BsonInt32(10))
                 .append('collation', defaultCollation.asDocument())
                 .append('hint', hint)
 
@@ -270,7 +244,8 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
     def 'should support collation'() {
         given:
         getCollectionHelper().insertDocuments(BsonDocument.parse('{str: "foo"}'))
-        def operation = new CountDocumentsOperation(namespace).filter(BsonDocument.parse('{str: "FOO"}'))
+        def operation = new CountDocumentsOperation(namespace)
+                .filter(BsonDocument.parse('{str: "FOO"}'))
                 .collation(caseInsensitiveCollation)
 
         when:
@@ -285,16 +260,16 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
 
     def 'should add read concern to command'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readConnectionSource >> source
-        binding.sessionContext >> sessionContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')])
         def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName()))
                 .append('pipeline', pipeline)
@@ -309,8 +284,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, binding) >>
-                helper.cursorResult
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >> helper.cursorResult
         1 * connection.release()
 
         where:
@@ -326,15 +300,16 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat
 
     def 'should add read concern to command asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
+        source.operationContext >> operationContext
         def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')])
         def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName()))
                 .append('pipeline', pipeline)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy
index c327721bbd5..cddb1925b64 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy
@@ -38,7 +38,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
 
     def 'should have the correct defaults'() {
         when:
-        CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        CreateCollectionOperation operation = createOperation()
 
         then:
         !operation.isCapped()
@@ -61,7 +61,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         def validator = BsonDocument.parse('{ level: { $gte : 10 }}')
 
         when:
-        CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        CreateCollectionOperation operation = createOperation()
             .autoIndex(false)
             .capped(true)
             .sizeInBytes(1000)
@@ -91,7 +91,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         assert !collectionNameExists(getCollectionName())
 
         when:
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
         execute(operation, async)
 
         then:
@@ -108,16 +108,16 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         if (serverVersionLessThan(4, 2)) {
             storageEngineOptions.append('mmapv1', new BsonDocument())
         }
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .storageEngineOptions(storageEngineOptions)
 
         when:
         execute(operation, async)
 
         then:
-        new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find {
-            it -> it.getString('name').value == getCollectionName()
-        }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions
+        new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec())
+                .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() }
+                .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions
 
         where:
         async << [true, false]
@@ -130,17 +130,16 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         if (serverVersionLessThan(4, 2)) {
             storageEngineOptions.append('mmapv1', new BsonDocument())
         }
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .storageEngineOptions(storageEngineOptions)
 
         when:
         execute(operation, async)
 
         then:
-        new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find {
-            it -> it.getString('name').value == getCollectionName()
-        }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions
-
+        new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec())
+                .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() }
+                .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions
         where:
         async << [true, false]
     }
@@ -148,7 +147,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
     def 'should create capped collection'() {
         given:
         assert !collectionNameExists(getCollectionName())
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .capped(true)
                 .maxDocuments(100)
                 .sizeInBytes(40 * 1024)
@@ -177,7 +176,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
     def 'should create collection in respect to the autoIndex option'() {
         given:
         assert !collectionNameExists(getCollectionName())
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .autoIndex(autoIndex)
 
         when:
@@ -199,7 +198,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         given:
         assert !collectionNameExists(getCollectionName())
         def indexOptionDefaults = BsonDocument.parse('{ storageEngine: { wiredTiger : {} }}')
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .indexOptionDefaults(indexOptionDefaults)
 
         when:
@@ -218,7 +217,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         given:
         assert !collectionNameExists(getCollectionName())
         def validator = BsonDocument.parse('{ level: { $gte : 10 }}')
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName())
+        def operation = createOperation()
                 .validator(validator)
                 .validationLevel(ValidationLevel.MODERATE)
                 .validationAction(ValidationAction.ERROR)
@@ -247,7 +246,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
     def 'should throw on write concern error'() {
         given:
         assert !collectionNameExists(getCollectionName())
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName(), new WriteConcern(5))
+        def operation = createOperation(new WriteConcern(5))
 
         when:
         execute(operation, async)
@@ -264,7 +263,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
     @IgnoreIf({ serverVersionLessThan(3, 4) })
     def 'should be able to create a collection with a collation'() {
         given:
-        def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()).collation(defaultCollation)
+        def operation = createOperation().collation(defaultCollation)
 
         when:
         execute(operation, async)
@@ -287,6 +286,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
         getCollectionInfo(collectionName) != null
     }
 
+
     BsonDocument storageStats() {
         if (serverVersionLessThan(6, 2)) {
             return new CommandReadOperation<>(getDatabaseName(),
@@ -294,6 +294,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
                     new BsonDocumentCodec()).execute(getBinding())
         }
         BatchCursor<BsonDocument> cursor = new AggregateOperation(
+
                 getNamespace(),
                 singletonList(new BsonDocument('$collStats', new BsonDocument('storageStats', new BsonDocument()))),
                 new BsonDocumentCodec()).execute(getBinding())
@@ -303,4 +304,12 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific
             cursor.close()
         }
     }
+
+    def createOperation() {
+        createOperation(null)
+    }
+
+    def createOperation(WriteConcern writeConcern) {
+        new CreateCollectionOperation(getDatabaseName(), getCollectionName(), writeConcern)
+    }
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy
index 3f0f1938bb6..389f4388b54 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy
@@ -20,7 +20,6 @@ import com.mongodb.CreateIndexCommitQuorum
 import com.mongodb.DuplicateKeyException
 import com.mongodb.MongoClientException
 import com.mongodb.MongoCommandException
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoWriteConcernException
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.WriteConcern
@@ -35,11 +34,8 @@ import org.bson.Document
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
 import static com.mongodb.ClusterFixture.getBinding
 import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet
-import static com.mongodb.ClusterFixture.isSharded
 import static com.mongodb.ClusterFixture.serverVersionAtLeast
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static java.util.concurrent.TimeUnit.SECONDS
@@ -53,14 +49,13 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should get index names'() {
         when:
-        def createIndexOperation = new CreateIndexesOperation(getNamespace(),
-                                                              [new IndexRequest(new BsonDocument('field1', new BsonInt32(1))),
-                                                               new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))),
-                                                               new IndexRequest(new BsonDocument('field3', new BsonInt32(1))
-                                                                                        .append('field4', new BsonInt32(-1))),
-                                                               new IndexRequest(new BsonDocument('field5', new BsonInt32(-1)))
-                                                                       .name('customName')
-                                                              ])
+        def createIndexOperation = createOperation([new IndexRequest(new BsonDocument('field1', new BsonInt32(1))),
+                                                    new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))),
+                                                    new IndexRequest(new BsonDocument('field3', new BsonInt32(1))
+                                                            .append('field4', new BsonInt32(-1))),
+                                                    new IndexRequest(new BsonDocument('field5', new BsonInt32(-1)))
+                                                            .name('customName')
+        ])
         then:
         createIndexOperation.indexNames == ['field1_1', 'field2_-1', 'field3_1_field4_-1', 'customName']
     }
@@ -68,7 +63,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should be able to create a single index'() {
         given:
         def keys = new BsonDocument('field', new BsonInt32(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)])
+        def operation = createOperation([new IndexRequest(keys)])
 
         when:
         execute(operation, async)
@@ -80,32 +75,11 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
         async << [true, false]
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def keys = new BsonDocument('field', new BsonInt32(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]).maxTime(30, SECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     @IgnoreIf({ serverVersionAtLeast(4, 4) })
     def 'should throw exception if commit quorum is set where server < 4.4'() {
         given:
         def keys = new BsonDocument('field', new BsonInt32(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)])
+        def operation = createOperation([new IndexRequest(keys)])
                 .commitQuorum(CreateIndexCommitQuorum.MAJORITY)
 
         when:
@@ -124,7 +98,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
         def keys = new BsonDocument('field', new BsonInt32(1))
 
         when:
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)])
+        def operation = createOperation([new IndexRequest(keys)])
                 .commitQuorum(quorum)
 
         then:
@@ -144,7 +118,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should be able to create a single index with a BsonInt64'() {
         given:
         def keys = new BsonDocument('field', new BsonInt64(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)])
+        def operation = createOperation([new IndexRequest(keys)])
 
         when:
         execute(operation, async)
@@ -160,8 +134,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
         given:
         def keysForFirstIndex = new BsonDocument('field', new BsonInt32(1))
         def keysForSecondIndex = new BsonDocument('field2', new BsonInt32(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keysForFirstIndex),
-                                                                    new IndexRequest(keysForSecondIndex)])
+        def operation = createOperation([new IndexRequest(keysForFirstIndex),
+                                         new IndexRequest(keysForSecondIndex)])
 
         when:
         execute(operation, async)
@@ -176,7 +150,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should be able to create a single index on a nested field'() {
         given:
         def keys = new BsonDocument('x.y', new BsonInt32(1))
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)])
+        def operation = createOperation([new IndexRequest(keys)])
 
         when:
         execute(operation, async)
@@ -191,8 +165,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should be able to handle duplicate key errors when indexing'() {
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1)
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)])
+        def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)])
 
         when:
         execute(operation, async)
@@ -208,8 +181,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should drop duplicates'() {
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1)
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)])
+        def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)])
 
         when:
         execute(operation, async)
@@ -223,7 +195,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should throw when trying to build an invalid index'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument())])
+        def operation = createOperation([new IndexRequest(new BsonDocument())])
 
         when:
         execute(operation, async)
@@ -237,8 +209,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a unique index'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
 
         when:
         execute(operation, async)
@@ -248,8 +219,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         getCollectionHelper().drop(getNamespace())
-        operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)])
+        operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)])
         execute(operation, async)
 
         then:
@@ -261,7 +231,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a sparse index'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
 
         when:
         execute(operation, async)
@@ -271,8 +241,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         getCollectionHelper().drop(getNamespace())
-        operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)])
+        operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)])
         execute(operation, async)
 
         then:
@@ -284,8 +253,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a TTL indexes'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
 
         when:
         execute(operation, async)
@@ -295,8 +263,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         getCollectionHelper().drop(getNamespace())
-        operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)])
+        operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)])
         execute(operation, async)
 
         then:
@@ -309,8 +276,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a 2d indexes'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('2d')))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d')))])
 
         when:
         execute(operation, async)
@@ -320,8 +286,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         getCollectionHelper().drop(getNamespace())
-        operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)])
+        operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)])
         execute(operation, async)
 
         then:
@@ -336,8 +301,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a 2dSphereIndex'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))])
 
         when:
         execute(operation, async)
@@ -351,8 +315,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a 2dSphereIndex with version 1'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)])
 
         when:
         execute(operation, async)
@@ -367,11 +330,10 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a textIndex'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('text')))
-                         .defaultLanguage('es')
-                         .languageOverride('language')
-                         .weights(new BsonDocument('field', new BsonInt32(100)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text')))
+                                                 .defaultLanguage('es')
+                                                 .languageOverride('language')
+                                                 .weights(new BsonDocument('field', new BsonInt32(100)))])
 
         when:
         execute(operation, async)
@@ -388,8 +350,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a textIndexVersion'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('text')))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text')))])
 
         when:
         execute(operation, async)
@@ -403,8 +364,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
     def 'should be able to create a textIndexVersion with version 1'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)])
 
         when:
         execute(operation, async)
@@ -420,9 +380,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should pass through storage engine options'() {
         given:
         def storageEngineOptions = new Document('wiredTiger', new Document('configString', 'block_compressor=zlib'))
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('a', new BsonInt32(1)))
-                         .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1)))
+                                                 .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))])
 
         when:
         execute(operation, async)
@@ -438,9 +397,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     def 'should be able to create a partially filtered index'() {
         given:
         def partialFilterExpression = new Document('a', new Document('$gte', 10))
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))
-                         .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, new DocumentCodec()))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))
+                                                 .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression,
+                                                         new DocumentCodec()))])
 
         when:
         execute(operation, async)
@@ -473,8 +432,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     @IgnoreIf({ serverVersionLessThan(3, 4) })
     def 'should be able to create an index with collation'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)])
+        def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)])
 
         when:
         execute(operation, async)
@@ -491,9 +449,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     @IgnoreIf({ serverVersionLessThan(4, 2) })
     def 'should be able to create wildcard indexes'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('$**', new BsonInt32(1))),
-                 new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))),
+                                         new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))])
 
         when:
         execute(operation, async)
@@ -509,9 +466,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     @IgnoreIf({ serverVersionLessThan(4, 2) })
     def 'should be able to create wildcard index with projection'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('$**', new BsonInt32(1)))
-                        .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', BsonBoolean.FALSE))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1)))
+                                                 .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id',
+                                                         BsonBoolean.FALSE))])
 
         when:
         execute(operation, async)
@@ -527,7 +484,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
     @IgnoreIf({ serverVersionLessThan(4, 4) })
     def 'should be able to set hidden index'() {
         given:
-        def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
+        def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))])
 
         when:
         execute(operation, async)
@@ -537,8 +494,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         getCollectionHelper().drop(getNamespace())
-        operation = new CreateIndexesOperation(getNamespace(),
-                [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)])
+        operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)])
         execute(operation, async)
 
         then:
@@ -571,4 +527,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati
         getUserCreatedIndexes()*.get(keyname).findAll { it != null }
     }
 
+    def createOperation(final List<IndexRequest> requests) {
+        new CreateIndexesOperation(getNamespace(), requests, null)
+    }
+
 }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy
index 87fc13aaa31..52ad4334493 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy
@@ -51,7 +51,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification
         getCollectionHelper().insertDocuments([trueXDocument, falseXDocument])
 
         def pipeline = [new BsonDocument('$match', trueXDocument)]
-        def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, WriteConcern.ACKNOWLEDGED)
+        def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline,
+                WriteConcern.ACKNOWLEDGED)
 
         when:
         execute(operation, async)
@@ -79,7 +80,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification
         assert !collectionNameExists(viewOn)
         assert !collectionNameExists(viewName)
 
-        def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], WriteConcern.ACKNOWLEDGED)
+        def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [],
+                WriteConcern.ACKNOWLEDGED)
                 .collation(defaultCollation)
 
         when:
@@ -120,7 +122,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification
         def viewNamespace = new MongoNamespace(getDatabaseName(), viewName)
         assert !collectionNameExists(viewName)
 
-        def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], new WriteConcern(5))
+        def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [],
+                new WriteConcern(5))
 
         when:
         execute(operation, async)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy
index 40f707ccf1b..587e05e1d0c 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.operation
 
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadConcern
@@ -38,7 +37,6 @@ import com.mongodb.internal.session.SessionContext
 import org.bson.BsonBoolean
 import org.bson.BsonDocument
 import org.bson.BsonInt32
-import org.bson.BsonInt64
 import org.bson.BsonInvalidOperationException
 import org.bson.BsonString
 import org.bson.BsonTimestamp
@@ -53,15 +51,12 @@ import org.bson.codecs.ValueCodecProvider
 import org.bson.types.ObjectId
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION
-import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
 
 class DistinctOperationSpecification extends OperationFunctionalSpecification {
@@ -80,7 +75,6 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
 
         then:
         operation.getFilter() == null
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getCollation() == null
     }
 
@@ -90,13 +84,11 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
 
         when:
         DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder)
-                .maxTime(10, MILLISECONDS)
                 .filter(filter)
                 .collation(defaultCollation)
 
         then:
         operation.getFilter() == filter
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getCollation() == defaultCollation
     }
 
@@ -186,24 +178,6 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
         async << [true, false]
     }
 
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder).maxTime(1, SECONDS)
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     def 'should use the ReadBindings readPreference to set secondaryOk'() {
         when:
         def operation = new DistinctOperation(helper.namespace, 'name', helper.decoder)
@@ -219,13 +193,11 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
         when:
         def operation = new DistinctOperation(helper.namespace, 'name', new BsonDocumentCodec())
                 .filter(new BsonDocument('a', BsonBoolean.TRUE))
-                .maxTime(10, MILLISECONDS)
                 .collation(defaultCollation)
 
         def expectedCommand = new BsonDocument('distinct', new BsonString(helper.namespace.getCollectionName()))
                 .append('key', new BsonString('name'))
                 .append('query', operation.getFilter())
-                .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS)))
                 .append('collation', defaultCollation.asDocument())
 
         then:
@@ -240,7 +212,8 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
         given:
         def document = Document.parse('{str: "foo"}')
         getCollectionHelper().insertDocuments(document)
-        def operation = new DistinctOperation(namespace, 'str', stringDecoder).filter(BsonDocument.parse('{str: "FOO"}}'))
+        def operation = new DistinctOperation(namespace, 'str', stringDecoder)
+                .filter(BsonDocument.parse('{str: "FOO"}}'))
                 .collation(caseInsensitiveCollation)
 
         when:
@@ -255,16 +228,16 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add read concern to command'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readConnectionSource >> source
-        binding.sessionContext >> sessionContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName()))
                 .append('key', new BsonString('str'))
         appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
@@ -277,7 +250,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, _) >>
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >>
                 new BsonDocument('values', new BsonArrayWrapper([]))
         1 * connection.release()
 
@@ -294,14 +267,14 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add read concern to command asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
-        binding.serverApi >> null
         binding.readPreference >> ReadPreference.primary()
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
-        source.serverApi >> null
+        binding.operationContext >> operationContext
+        source.operationContext >> operationContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
         def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName()))
@@ -316,7 +289,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                  6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.commandAsync(_, commandDocument, _, _, _, *_) >> {
+        1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, *_) >> {
             it.last().onResult(new BsonDocument('values', new BsonArrayWrapper([])), null)
         }
         1 * connection.release()
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy
index 0c293ed58b0..67124fecf30 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy
@@ -37,7 +37,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat
         assert collectionNameExists(getCollectionName())
 
         when:
-        new DropCollectionOperation(getNamespace()).execute(getBinding())
+        new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding())
 
         then:
         !collectionNameExists(getCollectionName())
@@ -50,7 +50,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat
         assert collectionNameExists(getCollectionName())
 
         when:
-        executeAsync(new DropCollectionOperation(getNamespace()))
+        executeAsync(new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED))
 
         then:
         !collectionNameExists(getCollectionName())
@@ -61,7 +61,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat
         def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection')
 
         when:
-        new DropCollectionOperation(namespace).execute(getBinding())
+        new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding())
 
         then:
         !collectionNameExists('nonExistingCollection')
@@ -73,7 +73,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat
         def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection')
 
         when:
-        executeAsync(new DropCollectionOperation(namespace))
+        executeAsync(new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED))
 
         then:
         !collectionNameExists('nonExistingCollection')
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy
index 1069dbfe2a6..61648c1daec 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy
@@ -42,47 +42,28 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio
         assert databaseNameExists(databaseName)
 
         when:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        execute(new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED), async)
 
         then:
         !databaseNameExists(databaseName)
-    }
-
-
-    @IgnoreIf({ isSharded() })
-    def 'should drop a database that exists asynchronously'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection'))
-        assert databaseNameExists(databaseName)
 
-        when:
-        executeAsync(new DropDatabaseOperation(databaseName))
-
-        then:
-        !databaseNameExists(databaseName)
+        where:
+        async << [true, false]
     }
 
+
     def 'should not error when dropping a collection that does not exist'() {
         given:
         def dbName = 'nonExistingDatabase'
 
         when:
-        new DropDatabaseOperation(dbName).execute(getBinding())
+        execute(new DropDatabaseOperation(dbName, WriteConcern.ACKNOWLEDGED), async)
 
         then:
         !databaseNameExists(dbName)
-    }
-
-
-    def 'should not error when dropping a collection that does not exist asynchronously'() {
-        given:
-        def dbName = 'nonExistingDatabase'
 
-        when:
-        executeAsync(new DropDatabaseOperation(dbName))
-
-        then:
-        !databaseNameExists(dbName)
+        where:
+        async << [true, false]
     }
 
     @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() })
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy
index 029b2c8544b..a051231af7e 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 import com.mongodb.MongoException
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoWriteConcernException
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.WriteConcern
@@ -30,19 +29,15 @@ import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 import spock.lang.Unroll
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
 import static com.mongodb.ClusterFixture.getBinding
 import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet
-import static com.mongodb.ClusterFixture.isSharded
 import static com.mongodb.ClusterFixture.serverVersionLessThan
-import static java.util.concurrent.TimeUnit.SECONDS
 
 class DropIndexOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should not error when dropping non-existent index on non-existent collection'() {
         when:
-        execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async)
+        execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async)
 
         then:
         getIndexes().size() == 0
@@ -56,7 +51,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification {
         getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
 
         when:
-        execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async)
+        execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async)
 
         then:
         thrown(MongoException)
@@ -70,7 +65,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification {
         collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1)))
 
         when:
-        execute(new DropIndexOperation(getNamespace(), 'theField_1'), async)
+        execute(new DropIndexOperation(getNamespace(), 'theField_1', null), async)
         List<Document> indexes = getIndexes()
 
         then:
@@ -87,7 +82,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification {
         collectionHelper.createIndex(keys)
 
         when:
-        execute(new DropIndexOperation(getNamespace(), keys), async)
+        execute(new DropIndexOperation(getNamespace(), keys, null), async)
         List<Document> indexes = getIndexes()
 
         then:
@@ -105,35 +100,14 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification {
         ].combinations()
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def keys = new BsonDocument('theField', new BsonInt32(1))
-        collectionHelper.createIndex(keys)
-        def operation = new DropIndexOperation(getNamespace(), keys).maxTime(30, SECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        async << [true, false]
-    }
-
     def 'should drop existing index by key when using BsonInt64'() {
         given:
         def keys = new BsonDocument('theField', new BsonInt32(1))
         collectionHelper.createIndex(keys)
 
         when:
-        execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1))), async)
+        execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null),
+                async)
         List<Document> indexes = getIndexes()
 
         then:
@@ -150,7 +124,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification {
         collectionHelper.createIndex(new BsonDocument('theOtherField', new BsonInt32(1)))
 
         when:
-        execute(new DropIndexOperation(getNamespace(), '*'), async)
+        execute(new DropIndexOperation(getNamespace(), '*', null), async)
         List<Document> indexes = getIndexes()
 
         then:
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy
index aad74b1881f..ccc9614d1fb 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy
@@ -34,8 +34,6 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import java.util.concurrent.TimeUnit
-
 import static com.mongodb.ClusterFixture.configureFailPoint
 import static com.mongodb.ClusterFixture.disableFailPoint
 import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint
@@ -63,7 +61,6 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati
         operation.getFilter() == null
         operation.getSort() == null
         operation.getProjection() == null
-        operation.getMaxTime(TimeUnit.MILLISECONDS) == 0
         operation.getCollation() == null
     }
 
@@ -78,14 +75,12 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati
             .filter(filter)
             .sort(sort)
             .projection(projection)
-            .maxTime(10, TimeUnit.MILLISECONDS)
             .collation(defaultCollation)
 
         then:
         operation.getFilter() == filter
         operation.getSort() == sort
         operation.getProjection() == projection
-        operation.getMaxTime(TimeUnit.MILLISECONDS) == 10
         operation.getCollation() == defaultCollation
     }
 
@@ -118,8 +113,8 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati
         getWorkerCollectionHelper().insertDocuments(new WorkerCodec(), pete, sam)
 
         when:
-        FindAndDeleteOperation<Worker> operation = new FindAndDeleteOperation<Worker>(getNamespace(), ACKNOWLEDGED, false,
-                workerCodec).filter(new BsonDocument('name', new BsonString('Pete')))
+        FindAndDeleteOperation<Worker> operation = new FindAndDeleteOperation<Worker>(getNamespace(),
+                ACKNOWLEDGED, false, workerCodec).filter(new BsonDocument('name', new BsonString('Pete')))
         Worker returnedDocument = execute(operation, async)
 
         then:
@@ -220,12 +215,10 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati
         operation.filter(filter)
                 .sort(sort)
                 .projection(projection)
-                .maxTime(10, TimeUnit.MILLISECONDS)
 
         expectedCommand.append('query', filter)
                 .append('sort', sort)
                 .append('fields', projection)
-                .append('maxTimeMS', new BsonInt64(10))
 
         operation.collation(defaultCollation)
         expectedCommand.append('collation', defaultCollation.asDocument())
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy
index a4a0a48bd60..4c334fa0ea0 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy
@@ -40,8 +40,6 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import java.util.concurrent.TimeUnit
-
 import static com.mongodb.ClusterFixture.configureFailPoint
 import static com.mongodb.ClusterFixture.disableFailPoint
 import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint
@@ -62,7 +60,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
     def 'should have the correct defaults and passed values'() {
         when:
         def replacement = new BsonDocument('replace', new BsonInt32(1))
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec,
+                replacement)
 
         then:
         operation.getNamespace() == getNamespace()
@@ -72,7 +71,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         operation.getFilter() == null
         operation.getSort() == null
         operation.getProjection() == null
-        operation.getMaxTime(TimeUnit.SECONDS) == 0
         operation.getBypassDocumentValidation() == null
         operation.getCollation() == null
     }
@@ -86,7 +84,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         when:
         def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec,
                 new BsonDocument('replace', new BsonInt32(1))).filter(filter).sort(sort).projection(projection)
-                .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true).returnOriginal(false)
+                .bypassDocumentValidation(true).upsert(true).returnOriginal(false)
                 .collation(defaultCollation)
 
         then:
@@ -94,7 +92,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         operation.getSort() == sort
         operation.getProjection() == projection
         operation.upsert == true
-        operation.getMaxTime(TimeUnit.SECONDS) == 1
         operation.getBypassDocumentValidation()
         !operation.isReturnOriginal()
         operation.getCollation() == defaultCollation
@@ -110,7 +107,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         helper.insertDocuments(new DocumentCodec(), pete, sam)
 
         when:
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, jordan)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
         Document returnedDocument = execute(operation, async)
 
@@ -144,8 +142,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         helper.insertDocuments(new WorkerCodec(), pete, sam)
 
         when:
-        def operation = new FindAndReplaceOperation<Worker>(getNamespace(), ACKNOWLEDGED, false, workerCodec,
-                replacement).filter(new BsonDocument('name', new BsonString('Pete')))
+        def operation = new FindAndReplaceOperation<Worker>(getNamespace(), ACKNOWLEDGED, false,
+                workerCodec, replacement).filter(new BsonDocument('name', new BsonString('Pete')))
         Worker returnedDocument = execute(operation, async)
 
         then:
@@ -154,7 +152,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
 
         when:
         replacement = new BsonDocumentWrapper<Worker>(pete, workerCodec)
-        operation = new FindAndReplaceOperation<Worker>(getNamespace(), ACKNOWLEDGED, false, workerCodec, replacement)
+        operation = new FindAndReplaceOperation<Worker>(getNamespace(), ACKNOWLEDGED, false, workerCodec,
+                replacement)
                 .filter(new BsonDocument('name', new BsonString('Jordan')))
                 .returnOriginal(false)
         returnedDocument = execute(operation, async)
@@ -169,7 +168,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
     def 'should return null if query fails to match'() {
         when:
         BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}')
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, jordan)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
         Document returnedDocument = execute(operation, async)
 
@@ -183,7 +183,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
     def 'should throw an exception if replacement contains update operators'() {
         given:
         def replacement = new BsonDocumentWrapper<Document>(['$inc': 1] as Document, documentCodec)
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, replacement)
 
         when:
         execute(operation, async)
@@ -207,7 +208,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
 
         when:
         def replacement = new BsonDocument('level', new BsonInt32(9))
-        def operation = new FindAndReplaceOperation<Document>(namespace, ACKNOWLEDGED, false, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(namespace, ACKNOWLEDGED, false,
+                documentCodec, replacement)
         execute(operation, async)
 
         then:
@@ -245,8 +247,9 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}')
 
         when:
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), new WriteConcern(5, 1),
-                false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete')))
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(),
+                new WriteConcern(5, 1), false, documentCodec, jordan)
+                .filter(new BsonDocument('name', new BsonString('Pete')))
         execute(operation, async)
 
         then:
@@ -341,12 +344,10 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
                 .sort(sort)
                 .projection(projection)
                 .bypassDocumentValidation(true)
-                .maxTime(10, TimeUnit.MILLISECONDS)
 
         expectedCommand.append('query', filter)
                 .append('sort', sort)
                 .append('fields', projection)
-                .append('maxTimeMS', new BsonInt64(10))
 
         operation.collation(defaultCollation)
         expectedCommand.append('collation', defaultCollation.asDocument())
@@ -376,7 +377,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         helper.insertDocuments(new DocumentCodec(), pete, sam)
 
         when:
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, jordan)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, jordan)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
 
         enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}'))
@@ -398,7 +400,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         when:
         def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec()))
         def replacement = BsonDocument.parse('{ replacement: 1}')
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, replacement)
         def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName()))
                 .append('update', replacement)
                 .append('txnNumber', new BsonInt64(0))
@@ -414,7 +417,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
     def 'should throw original error when retrying and failing'() {
         given:
         def replacement = BsonDocument.parse('{ replacement: 1}')
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, replacement)
         def originalException = new MongoSocketException('Some failure', new ServerAddress())
 
         when:
@@ -443,7 +447,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat
         def document = Document.parse('{_id: 1, str: "foo"}')
         getCollectionHelper().insertDocuments(document)
         def replacement = BsonDocument.parse('{str: "bar"}')
-        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement)
+        def operation = new FindAndReplaceOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, replacement)
                 .filter(BsonDocument.parse('{str: "FOO"}'))
                 .collation(caseInsensitiveCollation)
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy
index d6625cd4d88..821eacbee6e 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy
@@ -41,8 +41,6 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import java.util.concurrent.TimeUnit
-
 import static com.mongodb.ClusterFixture.configureFailPoint
 import static com.mongodb.ClusterFixture.disableFailPoint
 import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint
@@ -64,7 +62,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
     def 'should have the correct defaults and passed values'() {
         when:
         def update = new BsonDocument('update', new BsonInt32(1))
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
 
         then:
         operation.getNamespace() == getNamespace()
@@ -74,7 +73,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         operation.getFilter() == null
         operation.getSort() == null
         operation.getProjection() == null
-        operation.getMaxTime(TimeUnit.SECONDS) == 0
         operation.getBypassDocumentValidation() == null
         operation.getCollation() == null
     }
@@ -93,7 +91,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         operation.getFilter() == null
         operation.getSort() == null
         operation.getProjection() == null
-        operation.getMaxTime(TimeUnit.SECONDS) == 0
         operation.getBypassDocumentValidation() == null
         operation.getCollation() == null
     }
@@ -105,9 +102,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         def projection = new BsonDocument('projection', new BsonInt32(1))
 
         when:
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec,
-                new BsonDocument('update', new BsonInt32(1))).filter(filter).sort(sort).projection(projection)
-                .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(),
+                ACKNOWLEDGED, false, documentCodec, new BsonDocument('update', new BsonInt32(1)))
+                .filter(filter)
+                .sort(sort)
+                .projection(projection)
+                .bypassDocumentValidation(true).upsert(true)
                 .returnOriginal(false)
                 .collation(defaultCollation)
 
@@ -116,7 +116,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         operation.getSort() == sort
         operation.getProjection() == projection
         operation.upsert == true
-        operation.getMaxTime(TimeUnit.SECONDS) == 1
         operation.getBypassDocumentValidation()
         !operation.isReturnOriginal()
         operation.getCollation() == defaultCollation
@@ -130,10 +129,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         def projection = new BsonDocument('projection', new BsonInt32(1))
 
         when:
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec,
-                new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1)))))
-                        .filter(filter).sort(sort).projection(projection)
-                .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1)))))
+                .filter(filter)
+                .sort(sort)
+                .projection(projection)
+                .bypassDocumentValidation(true).upsert(true)
                 .returnOriginal(false)
                 .collation(defaultCollation)
 
@@ -142,7 +143,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         operation.getSort() == sort
         operation.getProjection() == projection
         operation.upsert == true
-        operation.getMaxTime(TimeUnit.SECONDS) == 1
         operation.getBypassDocumentValidation()
         !operation.isReturnOriginal()
         operation.getCollation() == defaultCollation
@@ -158,7 +158,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
         Document returnedDocument = execute(operation, async)
 
@@ -169,7 +170,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
                 .returnOriginal(false)
         returnedDocument = execute(operation, async)
@@ -223,7 +225,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        def operation = new FindAndUpdateOperation<Worker>(getNamespace(), ACKNOWLEDGED, false, workerCodec, update)
+        def operation = new FindAndUpdateOperation<Worker>(getNamespace(), ACKNOWLEDGED, false,
+                workerCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
         Worker returnedDocument = execute(operation, async)
 
@@ -234,7 +237,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        operation = new FindAndUpdateOperation<Worker>(getNamespace(), ACKNOWLEDGED, false, workerCodec, update)
+        operation = new FindAndUpdateOperation<Worker>(getNamespace(), ACKNOWLEDGED, false,
+                workerCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
                 .returnOriginal(false)
         returnedDocument = execute(operation, async)
@@ -287,7 +291,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
     def 'should throw an exception if update contains fields that are not update operators'() {
         given:
         def update = new BsonDocument('x', new BsonInt32(1))
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
 
         when:
         execute(operation, async)
@@ -333,7 +338,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         def update = new BsonDocument('$inc', new BsonDocument('level', new BsonInt32(-1)))
-        def operation = new FindAndUpdateOperation<Document>(namespace, ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(namespace, ACKNOWLEDGED, false,
+                documentCodec, update)
         execute(operation, async)
 
         then:
@@ -368,7 +374,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
 
         when:
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(),
+                new WriteConcern(5, 1), false, documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
         execute(operation, async)
 
@@ -381,7 +388,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         ex.writeResult.upsertedId == null
 
         when:
-        operation = new FindAndUpdateOperation<Document>(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update)
+        operation = new FindAndUpdateOperation<Document>(getNamespace(), new WriteConcern(5, 1), false,
+                documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Bob')))
                 .upsert(true)
         execute(operation, async)
@@ -410,7 +418,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         configureFailPoint(failPoint)
 
         def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
 
         when:
@@ -461,12 +470,10 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
                 .sort(sort)
                 .projection(projection)
                 .bypassDocumentValidation(true)
-                .maxTime(10, TimeUnit.MILLISECONDS)
 
         expectedCommand.append('query', filter)
                 .append('sort', sort)
                 .append('fields', projection)
-                .append('maxTimeMS', new BsonInt64(10))
 
         operation.collation(defaultCollation)
         expectedCommand.append('collation', defaultCollation.asDocument())
@@ -496,7 +503,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
 
         when:
         def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1)))
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, update)
                 .filter(new BsonDocument('name', new BsonString('Pete')))
 
         enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}'))
@@ -519,7 +527,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         when:
         def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec()))
         def update = BsonDocument.parse('{ update: 1}')
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, update)
         def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName()))
                 .append('update', update)
                 .append('txnNumber', new BsonInt64(0))
@@ -535,7 +544,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
     def 'should throw original error when retrying and failing'() {
         given:
         def update = BsonDocument.parse('{ update: 1}')
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, true,
+                documentCodec, update)
         def originalException = new MongoSocketException('Some failure', new ServerAddress())
 
         when:
@@ -564,7 +574,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         def document = Document.parse('{_id: 1, str: "foo"}')
         getCollectionHelper().insertDocuments(document)
         def update = BsonDocument.parse('{ $set: {str: "bar"}}')
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
                 .filter(BsonDocument.parse('{str: "FOO"}'))
                 .collation(caseInsensitiveCollation)
 
@@ -586,7 +597,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati
         getCollectionHelper().insertDocuments(documentOne, documentTwo)
         def update = BsonDocument.parse('{ $set: {"y.$[i].b": 2}}')
         def arrayFilters = [BsonDocument.parse('{"i.b": 3}')]
-        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false, documentCodec, update)
+        def operation = new FindAndUpdateOperation<Document>(getNamespace(), ACKNOWLEDGED, false,
+                documentCodec, update)
                 .returnOriginal(false)
                 .arrayFilters(arrayFilters)
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy
index 3bd84accd6f..f70cac7b6ad 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 import com.mongodb.ClusterFixture
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.MongoQueryException
 import com.mongodb.OperationFunctionalSpecification
@@ -31,7 +30,7 @@ import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ConnectionId
 import com.mongodb.connection.ServerId
-import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.binding.AsyncClusterBinding
 import com.mongodb.internal.binding.AsyncConnectionSource
 import com.mongodb.internal.binding.AsyncReadBinding
@@ -53,10 +52,10 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.executeSync
+import static com.mongodb.ClusterFixture.getAsyncBinding
 import static com.mongodb.ClusterFixture.getAsyncCluster
 import static com.mongodb.ClusterFixture.getBinding
 import static com.mongodb.ClusterFixture.getCluster
@@ -69,8 +68,6 @@ import static com.mongodb.CursorType.TailableAwait
 import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION
-import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 import static org.junit.Assert.assertEquals
 
 class FindOperationSpecification extends OperationFunctionalSpecification {
@@ -86,8 +83,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         operation.getNamespace() == getNamespace()
         operation.getDecoder() == decoder
         operation.getFilter() == null
-        operation.getMaxTime(MILLISECONDS) == 0
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
         operation.getHint() == null
         operation.getLimit() == 0
         operation.getSkip() == 0
@@ -107,8 +102,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
         when:
         FindOperation operation = new FindOperation<Document>(getNamespace(), new DocumentCodec())
-                .maxTime(10, SECONDS)
-                .maxAwaitTime(20, SECONDS)
                 .filter(filter)
                 .limit(20)
                 .skip(30)
@@ -123,8 +116,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
         then:
         operation.getFilter() == filter
-        operation.getMaxTime(MILLISECONDS) == 10000
-        operation.getMaxAwaitTime(MILLISECONDS) == 20000
         operation.getLimit() == 20
         operation.getSkip() == 30
         operation.getHint() == hint
@@ -166,7 +157,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         where:
         [async, operation] << [
                 [true, false],
-                [new FindOperation<Document>(getNamespace(), new DocumentCodec()).filter(new BsonDocument('_id', new BsonInt32(1)))]
+                [new FindOperation<Document>(getNamespace(), new DocumentCodec())
+                         .filter(new BsonDocument('_id', new BsonInt32(1)))]
         ].combinations()
     }
 
@@ -186,7 +178,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         where:
         [async, operation] << [
                 [true, false],
-                [new FindOperation<Document>(getNamespace(), new DocumentCodec()).sort(new BsonDocument('_id', new BsonInt32(1)))]
+                [new FindOperation<Document>(getNamespace(), new DocumentCodec())
+                         .sort(new BsonDocument('_id', new BsonInt32(1)))]
         ].combinations()
     }
 
@@ -308,29 +301,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         async << [true, false]
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document())
-
-        enableMaxTimeFailPoint()
-
-        when:
-        execute(operation, async)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-
-        where:
-        [async, operation] << [
-                [true, false],
-                [new FindOperation<Document>(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS)]
-        ].combinations()
-    }
-
     def '$max should limit items returned'() {
         given:
         (1..100).each {
@@ -417,8 +387,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
     def 'should apply comment'() {
         given:
         def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile'))
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec())
-                .execute(getBinding())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)),
+                new BsonDocumentCodec()).execute(getBinding())
         def expectedComment = 'this is a comment'
         def operation = new FindOperation<Document>(getNamespace(), new DocumentCodec())
                 .comment(new BsonString(expectedComment))
@@ -437,7 +407,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         }
 
         cleanup:
-        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec())
+        new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)),
+                new BsonDocumentCodec())
                 .execute(getBinding())
         profileCollectionHelper.drop()
 
@@ -468,10 +439,9 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         given:
         collectionHelper.insertDocuments(new DocumentCodec(), new Document())
         def operation = new FindOperation<Document>(getNamespace(), new DocumentCodec())
-        def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null,
-                IgnorableRequestContext.INSTANCE)
-        def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null,
-                IgnorableRequestContext.INSTANCE)
+        def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)
+        def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT,
+                OPERATION_CONTEXT)
 
         when:
         def result = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding)
@@ -495,9 +465,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         def hedgeOptions = isHedgeEnabled != null ?
                 ReadPreferenceHedgeOptions.builder().enabled(isHedgeEnabled as boolean).build() : null
         def readPreference = ReadPreference.primaryPreferred().withHedgeOptions(hedgeOptions)
-        def syncBinding = new ClusterBinding(getCluster(), readPreference, ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE)
-        def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), readPreference, ReadConcern.DEFAULT, null,
-                IgnorableRequestContext.INSTANCE)
+        def syncBinding = new ClusterBinding(getCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT)
+        def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT)
         def cursor = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding)
         def firstBatch = {
             if (async) {
@@ -518,16 +487,16 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add read concern to command'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readConnectionSource >> source
-        binding.sessionContext >> sessionContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def commandDocument = new BsonDocument('find', new BsonString(getCollectionName()))
         appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
 
@@ -539,7 +508,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                  6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, binding) >>
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >>
                 new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                         .append('ns', new BsonString(getNamespace().getFullName()))
                         .append('firstBatch', new BsonArrayWrapper([])))
@@ -558,14 +527,14 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add read concern to command asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
-        source.serverApi >> null
+        source.operationContext >> operationContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
         def commandDocument = new BsonDocument('find', new BsonString(getCollectionName()))
@@ -579,7 +548,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                  6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> {
+        1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> {
             it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                     .append('ns', new BsonString(getNamespace().getFullName()))
                     .append('firstBatch', new BsonArrayWrapper([]))), null)
@@ -599,16 +568,16 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add allowDiskUse to command if the server version >= 3.2'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
         binding.readConnectionSource >> source
-        binding.serverApi >> null
-        binding.sessionContext >> sessionContext
+        binding.operationContext >> operationContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE)
         appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
 
@@ -620,7 +589,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, binding) >>
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >>
                 new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                         .append('ns', new BsonString(getNamespace().getFullName()))
                         .append('firstBatch', new BsonArrayWrapper([])))
@@ -639,14 +608,14 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
 
     def 'should add allowDiskUse to command if the server version >= 3.2 asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readPreference >> ReadPreference.primary()
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
-        source.serverApi >> null
+        source.operationContext >> operationContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
         def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE)
@@ -660,7 +629,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                 6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> {
+        1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> {
             it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1))
                     .append('ns', new BsonString(getNamespace().getFullName()))
                     .append('firstBatch', new BsonArrayWrapper([]))), null)
@@ -681,17 +650,24 @@ class FindOperationSpecification extends OperationFunctionalSpecification {
     //  sanity check that the server accepts tailable and await data flags
     def 'should pass tailable and await data flags through'() {
         given:
-        def (cursorType, maxAwaitTimeMS, maxTimeMSForCursor) = cursorDetails
+        def (cursorType, long maxAwaitTimeMS, long maxTimeMSForCursor) = cursorDetails
+        def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT.withMaxAwaitTimeMS(maxAwaitTimeMS)
+        def timeoutContext = Spy(TimeoutContext, constructorArgs: [timeoutSettings])
+        def operationContext = OPERATION_CONTEXT.withTimeoutContext(timeoutContext)
+
         collectionHelper.create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000))
         def operation = new FindOperation<BsonDocument>(namespace, new BsonDocumentCodec())
                 .cursorType(cursorType)
-                .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS)
 
         when:
-        def cursor = execute(operation, async)
+        if (async) {
+            execute(operation, getBinding(operationContext))
+        } else {
+            execute(operation, getAsyncBinding(operationContext))
+        }
 
         then:
-        cursor.maxTimeMS == maxTimeMSForCursor
+        timeoutContext.setMaxTimeOverride(maxTimeMSForCursor)
 
         where:
         [async, cursorDetails] << [
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy
index 38c267dd3f7..07a3fadc5fd 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy
@@ -17,12 +17,12 @@
 package com.mongodb.internal.operation
 
 
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadPreference
 import com.mongodb.ServerAddress
 import com.mongodb.ServerCursor
+import com.mongodb.WriteConcern
 import com.mongodb.async.FutureResultCallback
 import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.connection.ConnectionDescription
@@ -45,14 +45,11 @@ import org.bson.codecs.Decoder
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.getBinding
-import static com.mongodb.ClusterFixture.isSharded
 import static com.mongodb.ClusterFixture.serverVersionAtLeast
 import static com.mongodb.ClusterFixture.serverVersionLessThan
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 
 class ListCollectionsOperationSpecification extends OperationFunctionalSpecification {
 
@@ -260,7 +257,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
     def 'should filter indexes when calling hasNext before next'() {
         given:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding())
         addSeveralIndexes()
         def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2)
 
@@ -276,7 +273,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
     def 'should filter indexes without calling hasNext before next'() {
         given:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding())
         addSeveralIndexes()
         def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2)
 
@@ -298,7 +295,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
     def 'should filter indexes when calling hasNext before tryNext'() {
         given:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding())
         addSeveralIndexes()
         def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2)
 
@@ -320,7 +317,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
     def 'should filter indexes without calling hasNext before tryNext'() {
         given:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding())
         addSeveralIndexes()
         def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2)
 
@@ -337,7 +334,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
     def 'should filter indexes asynchronously'() {
         given:
-        new DropDatabaseOperation(databaseName).execute(getBinding())
+        new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding())
         addSeveralIndexes()
         def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2)
 
@@ -413,55 +410,18 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
         cursor?.close()
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document())
-        def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        operation.execute(getBinding())
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
-
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from executeAsync'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document())
-        def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        executeAsync(operation)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
     def 'should use the readPreference to set secondaryOk'() {
         given:
         def connection = Mock(Connection)
         def connectionSource = Stub(ConnectionSource) {
-            getServerApi() >> null
-            getReadPreference() >> readPreference
             getConnection() >> connection
+            getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def readBinding = Stub(ReadBinding) {
             getReadConnectionSource() >> connectionSource
             getReadPreference() >> readPreference
-            getServerApi() >> null
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def operation = new ListCollectionsOperation(helper.dbName, helper.decoder)
 
@@ -470,7 +430,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
         then:
         _ * connection.getDescription() >> helper.threeSixConnectionDescription
-        1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult
+        1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult
         1 * connection.release()
 
         where:
@@ -481,14 +441,14 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
         given:
         def connection = Mock(AsyncConnection)
         def connectionSource = Stub(AsyncConnectionSource) {
-            getReadPreference() >> readPreference
-            getServerApi() >> null
             getConnection(_) >> { it[0].onResult(connection, null) }
+            getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def readBinding = Stub(AsyncReadBinding) {
-            getReadPreference() >> readPreference
-            getServerApi() >> null
             getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
+            getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def operation = new ListCollectionsOperation(helper.dbName, helper.decoder)
 
@@ -497,7 +457,8 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica
 
         then:
         _ * connection.getDescription() >> helper.threeSixConnectionDescription
-        1 * connection.commandAsync(helper.dbName, _, _, readPreference, *_) >> { it.last().onResult(helper.commandResult, null) }
+        1 * connection.commandAsync(helper.dbName, _, _, readPreference, _, OPERATION_CONTEXT, *_) >> {
+            it.last().onResult(helper.commandResult, null) }
 
         where:
         readPreference << [ReadPreference.primary(), ReadPreference.secondary()]
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy
index 95afad40957..740f9073dcd 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadPreference
 import com.mongodb.connection.ConnectionDescription
@@ -33,14 +32,8 @@ import org.bson.BsonRegularExpression
 import org.bson.Document
 import org.bson.codecs.Decoder
 import org.bson.codecs.DocumentCodec
-import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.executeAsync
-import static com.mongodb.ClusterFixture.getBinding
-import static com.mongodb.ClusterFixture.isSharded
-import static java.util.concurrent.TimeUnit.MILLISECONDS
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 
 class ListDatabasesOperationSpecification extends OperationFunctionalSpecification {
     def codec = new DocumentCodec()
@@ -75,55 +68,18 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati
         async << [true, false]
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document())
-        def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        operation.execute(getBinding())
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
-
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from executeAsync'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document())
-        def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS)
-
-        enableMaxTimeFailPoint()
-
-        when:
-        executeAsync(operation)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
     def 'should use the readPreference to set secondaryOk'() {
         given:
         def connection = Mock(Connection)
         def connectionSource = Stub(ConnectionSource) {
-            getReadPreference() >> readPreference
-            getServerApi() >> null
             getConnection() >> connection
+            getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def readBinding = Stub(ReadBinding) {
             getReadConnectionSource() >> connectionSource
             getReadPreference() >> readPreference
-            getServerApi() >> null
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def operation = new ListDatabasesOperation(helper.decoder)
 
@@ -132,7 +88,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati
 
         then:
         _ * connection.getDescription() >> helper.connectionDescription
-        1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult
+        1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult
         1 * connection.release()
 
         where:
@@ -148,7 +104,6 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati
         }
         def readBinding = Stub(AsyncReadBinding) {
             getReadPreference() >> readPreference
-            getServerApi() >> null
             getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
         }
         def operation = new ListDatabasesOperation(helper.decoder)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy
index 51280de9b45..462bf367e50 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy
@@ -17,7 +17,6 @@
 package com.mongodb.internal.operation
 
 
-import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNamespace
 import com.mongodb.OperationFunctionalSpecification
 import com.mongodb.ReadPreference
@@ -42,14 +41,10 @@ import org.bson.BsonString
 import org.bson.Document
 import org.bson.codecs.Decoder
 import org.bson.codecs.DocumentCodec
-import spock.lang.IgnoreIf
 
-import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint
-import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.getBinding
-import static com.mongodb.ClusterFixture.isSharded
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 
 class ListIndexesOperationSpecification extends OperationFunctionalSpecification {
 
@@ -116,8 +111,8 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification
         def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec())
         collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1)))
         collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1)))
-        new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)])
-                .execute(getBinding())
+        new CreateIndexesOperation(namespace,
+                [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding())
 
         when:
         BatchCursor cursor = operation.execute(getBinding())
@@ -136,8 +131,8 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification
         def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec())
         collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1)))
         collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1)))
-        new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)])
-                .execute(getBinding())
+        new CreateIndexesOperation(namespace,
+                [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding())
 
         when:
         def cursor = executeAsync(operation)
@@ -212,56 +207,18 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification
         cursor?.close()
     }
 
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from execute'() {
-        given:
-        def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS)
-        collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1)))
-
-        enableMaxTimeFailPoint()
-
-        when:
-        operation.execute(getBinding())
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
-
-    @IgnoreIf({ isSharded() })
-    def 'should throw execution timeout exception from executeAsync'() {
-        given:
-        def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS)
-        collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1)))
-
-        enableMaxTimeFailPoint()
-
-        when:
-        executeAsync(operation)
-
-        then:
-        thrown(MongoExecutionTimeoutException)
-
-        cleanup:
-        disableMaxTimeFailPoint()
-    }
-
-
     def 'should use the readPreference to set secondaryOk'() {
         given:
         def connection = Mock(Connection)
         def connectionSource = Stub(ConnectionSource) {
-            getServerApi() >> null
-            getReadPreference() >> readPreference
             getConnection() >> connection
+            getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def readBinding = Stub(ReadBinding) {
-            getServerApi() >> null
             getReadConnectionSource() >> connectionSource
             getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def operation = new ListIndexesOperation(helper.namespace, helper.decoder)
 
@@ -270,7 +227,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification
 
         then:
         _ * connection.getDescription() >> helper.threeSixConnectionDescription
-        1 * connection.command(_, _, _, readPreference, _, readBinding) >> helper.commandResult
+        1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult
         1 * connection.release()
 
         where:
@@ -285,7 +242,6 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification
             getConnection(_) >> { it[0].onResult(connection, null) }
         }
         def readBinding = Stub(AsyncReadBinding) {
-            getServerApi() >> null
             getReadPreference() >> readPreference
             getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
         }
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy
index 5332eb34339..f7eb191773f 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy
@@ -29,7 +29,6 @@ import org.bson.BsonBoolean
 import org.bson.BsonDocument
 import org.bson.BsonDouble
 import org.bson.BsonInt32
-import org.bson.BsonInt64
 import org.bson.BsonJavaScript
 import org.bson.BsonString
 import org.bson.Document
@@ -42,15 +41,14 @@ import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet
 import static com.mongodb.ClusterFixture.serverVersionAtLeast
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.client.model.Filters.gte
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 
 class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpecification {
     def mapReduceInputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceInput')
     def mapReduceOutputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceOutput')
     def mapReduceOperation = new MapReduceToCollectionOperation(mapReduceInputNamespace,
-                                                                new BsonJavaScript('function(){ emit( this.name , 1 ); }'),
-                                                                new BsonJavaScript('function(key, values){ return values.length; }'),
-                                                                mapReduceOutputNamespace.getCollectionName())
+            new BsonJavaScript('function(){ emit( this.name , 1 ); }'),
+            new BsonJavaScript('function(key, values){ return values.length; }'),
+            mapReduceOutputNamespace.getCollectionName(), null)
     def expectedResults = [new BsonDocument('_id', new BsonString('Pete')).append('value', new BsonDouble(2.0)),
                            new BsonDocument('_id', new BsonString('Sam')).append('value', new BsonDouble(1.0))] as Set
     def helper = new CollectionHelper<BsonDocument>(new BsonDocumentCodec(), mapReduceOutputNamespace)
@@ -64,8 +62,9 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
     }
 
     def cleanup() {
-        new DropCollectionOperation(mapReduceInputNamespace).execute(getBinding())
-        new DropCollectionOperation(mapReduceOutputNamespace).execute(getBinding())
+        new DropCollectionOperation(mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding())
+        new DropCollectionOperation(mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED)
+                .execute(getBinding())
     }
 
     def 'should have the correct defaults'() {
@@ -75,7 +74,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         def out = 'outCollection'
 
         when:
-        def operation =  new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out)
+        def operation =  new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, null)
 
         then:
         operation.getMapFunction() == mapF
@@ -89,7 +88,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         operation.getLimit() == 0
         operation.getScope() == null
         operation.getSort() == null
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getBypassDocumentValidation() == null
         operation.getCollation() == null
         !operation.isJsMode()
@@ -118,7 +116,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
                 .limit(10)
                 .scope(scope)
                 .sort(sort)
-                .maxTime(1, MILLISECONDS)
                 .bypassDocumentValidation(true)
                 .collation(defaultCollation)
 
@@ -133,7 +130,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         operation.getLimit() == 10
         operation.getScope() == scope
         operation.getSort() == sort
-        operation.getMaxTime(MILLISECONDS) == 1
         operation.getBypassDocumentValidation() == true
         operation.getCollation() == defaultCollation
     }
@@ -183,7 +179,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace,
                 new BsonJavaScript('function(){ emit( "level" , 1 ); }'),
                 new BsonJavaScript('function(key, values){ return values.length; }'),
-                'collectionOut')
+                'collectionOut', null)
         execute(operation, async)
 
         then:
@@ -246,7 +242,8 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         def dbName = 'dbName'
 
         when:
-        def operation =  new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY)
+        def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out,
+                WriteConcern.MAJORITY)
         def expectedCommand = new BsonDocument('mapReduce', new BsonString(getCollectionName()))
                 .append('map', mapF)
                 .append('reduce', reduceF)
@@ -261,14 +258,15 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
                 ReadPreference.primary(), false)
 
         when:
-        operation.action(action)
+        operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out,
+                WriteConcern.MAJORITY)
+                .action(action)
                 .databaseName(dbName)
                 .finalizeFunction(finalizeF)
                 .filter(filter)
                 .limit(10)
                 .scope(scope)
                 .sort(sort)
-                .maxTime(10, MILLISECONDS)
                 .bypassDocumentValidation(true)
                 .verbose(true)
 
@@ -279,7 +277,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
                 .append('scope', scope)
                 .append('verbose', BsonBoolean.TRUE)
                 .append('limit', new BsonInt32(10))
-                .append('maxTimeMS', new BsonInt64(10))
 
         if (includeCollation) {
             operation.collation(defaultCollation)
@@ -310,7 +307,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe
         def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace,
                 new BsonJavaScript('function(){ emit( this._id, this.str ); }'),
                 new BsonJavaScript('function(key, values){ return values; }'),
-                'collectionOut')
+                'collectionOut', null)
                 .filter(BsonDocument.parse('{str: "FOO"}'))
                 .collation(caseInsensitiveCollation)
 
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy
index 28986a76e33..3289f10f578 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy
@@ -37,7 +37,6 @@ import org.bson.BsonBoolean
 import org.bson.BsonDocument
 import org.bson.BsonDouble
 import org.bson.BsonInt32
-import org.bson.BsonInt64
 import org.bson.BsonJavaScript
 import org.bson.BsonString
 import org.bson.BsonTimestamp
@@ -46,17 +45,16 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 import spock.lang.IgnoreIf
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ClusterFixture.executeAsync
 import static com.mongodb.ClusterFixture.serverVersionLessThan
 import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand
 import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 
 class MapReduceWithInlineResultsOperationSpecification extends OperationFunctionalSpecification {
     private final bsonDocumentCodec = new BsonDocumentCodec()
-    def mapReduceOperation = new MapReduceWithInlineResultsOperation<BsonDocument>(
-            getNamespace(),
+    def mapReduceOperation = new MapReduceWithInlineResultsOperation<BsonDocument>(getNamespace(),
             new BsonJavaScript('function(){ emit( this.name , 1 ); }'),
             new BsonJavaScript('function(key, values){ return values.length; }'),
             bsonDocumentCodec)
@@ -76,7 +74,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         when:
         def mapF = new BsonJavaScript('function(){ }')
         def reduceF = new BsonJavaScript('function(key, values){ }')
-        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace, mapF, reduceF, bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace, mapF, reduceF,
+                bsonDocumentCodec)
 
         then:
         operation.getMapFunction() == mapF
@@ -85,7 +84,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         operation.getFinalizeFunction() == null
         operation.getScope() == null
         operation.getSort() == null
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.getLimit() == 0
         operation.getCollation() == null
         !operation.isJsMode()
@@ -100,7 +98,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         def finalizeF = new BsonJavaScript('function(key, value){}')
         def mapF = new BsonJavaScript('function(){ }')
         def reduceF = new BsonJavaScript('function(key, values){ }')
-        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace, mapF, reduceF, bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace,
+                mapF, reduceF, bsonDocumentCodec)
                 .filter(filter)
                 .finalizeFunction(finalizeF)
                 .scope(scope)
@@ -108,7 +107,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
                 .jsMode(true)
                 .verbose(true)
                 .limit(20)
-                .maxTime(10, MILLISECONDS)
                 .collation(defaultCollation)
 
         then:
@@ -118,7 +116,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         operation.getFinalizeFunction() == finalizeF
         operation.getScope() == scope
         operation.getSort() == sort
-        operation.getMaxTime(MILLISECONDS) == 10
         operation.getLimit() == 20
         operation.getCollation() == defaultCollation
         operation.isJsMode()
@@ -141,8 +138,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
 
     def 'should use the ReadBindings readPreference to set secondaryOk'() {
         when:
-        def operation = new MapReduceWithInlineResultsOperation<Document>(helper.namespace, new BsonJavaScript('function(){ }'),
-                new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<Document>(helper.namespace,
+                new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
 
         then:
         testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult)
@@ -153,8 +150,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
 
     def 'should create the expected command'() {
         when:
-        def operation = new MapReduceWithInlineResultsOperation<Document>(helper.namespace, new BsonJavaScript('function(){ }'),
-                new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<Document>(helper.namespace,
+                new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
         def expectedCommand = new BsonDocument('mapReduce', new BsonString(helper.namespace.getCollectionName()))
             .append('map', operation.getMapFunction())
             .append('reduce', operation.getReduceFunction())
@@ -171,7 +168,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
                 .jsMode(true)
                 .verbose(true)
                 .limit(20)
-                .maxTime(10, MILLISECONDS)
 
 
         expectedCommand.append('query', operation.getFilter())
@@ -180,7 +176,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
                 .append('finalize', operation.getFinalizeFunction())
                 .append('jsMode', BsonBoolean.TRUE)
                 .append('verbose', BsonBoolean.TRUE)
-                .append('maxTimeMS', new BsonInt64(10))
                 .append('limit', new BsonInt32(20))
 
         if (includeCollation) {
@@ -204,8 +199,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         given:
         def document = Document.parse('{_id: 1, str: "foo"}')
         getCollectionHelper().insertDocuments(document)
-        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(
-                namespace,
+        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(namespace,
                 new BsonJavaScript('function(){ emit( this.str, 1 ); }'),
                 new BsonJavaScript('function(key, values){ return Array.sum(values); }'),
                 bsonDocumentCodec)
@@ -224,16 +218,16 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
 
     def 'should add read concern to command'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(ReadBinding)
         def source = Stub(ConnectionSource)
         def connection = Mock(Connection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.readConnectionSource >> source
-        binding.sessionContext >> sessionContext
         source.connection >> connection
         source.retain() >> source
-        source.getServerApi() >> null
+        source.operationContext >> operationContext
         def commandDocument = BsonDocument.parse('''
             { "mapReduce" : "coll",
               "map" : { "$code" : "function(){ }" },
@@ -242,8 +236,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
               }''')
         appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
 
-        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace, new BsonJavaScript('function(){ }'),
-                new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace,
+                new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
 
         when:
         operation.execute(binding)
@@ -251,7 +245,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                  6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.command(_, commandDocument, _, _, _, binding) >>
+        1 * connection.command(_, commandDocument, _, _, _, operationContext) >>
                 new BsonDocument('results', new BsonArrayWrapper([]))
                         .append('counts',
                         new BsonDocument('input', new BsonInt32(0))
@@ -273,14 +267,14 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
 
     def 'should add read concern to command asynchronously'() {
         given:
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
         def binding = Stub(AsyncReadBinding)
         def source = Stub(AsyncConnectionSource)
         def connection = Mock(AsyncConnection)
         binding.readPreference >> ReadPreference.primary()
-        binding.serverApi >> null
+        binding.operationContext >> operationContext
         binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) }
-        binding.sessionContext >> sessionContext
-        source.serverApi >> null
+        source.operationContext >> operationContext
         source.getConnection(_) >> { it[0].onResult(connection, null) }
         source.retain() >> source
         def commandDocument = BsonDocument.parse('''
@@ -291,8 +285,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
               }''')
         appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument)
 
-        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace, new BsonJavaScript('function(){ }'),
-                new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
+        def operation = new MapReduceWithInlineResultsOperation<BsonDocument>(helper.namespace,
+                new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec)
 
         when:
         executeAsync(operation, binding)
@@ -300,7 +294,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction
         then:
         _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
                  6, STANDALONE, 1000, 100000, 100000, [])
-        1 * connection.commandAsync(_, commandDocument, _, _, _, binding, _) >> {
+        1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> {
             it.last().onResult(new BsonDocument('results', new BsonArrayWrapper([]))
                     .append('counts',
                     new BsonDocument('input', new BsonInt32(0))
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy
index 7e7938acfe2..9363f6a1812 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy
@@ -100,8 +100,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
 
     def 'when no document with the same id exists, should insert the document'() {
         given:
-        def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))],
-                ordered, ACKNOWLEDGED, false)
+        def operation = new MixedBulkWriteOperation(getNamespace(),
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], ordered, ACKNOWLEDGED, false)
 
         when:
         BulkWriteResult result = execute(operation, async)
@@ -120,7 +120,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         given:
         def document = new BsonDocument('_id', new BsonInt32(1))
         getCollectionHelper().insertDocuments(document)
-        def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, ACKNOWLEDGED, false)
+        def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered,
+                ACKNOWLEDGED, false)
 
         when:
         execute(operation, async)
@@ -135,8 +136,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
 
     def 'RawBsonDocument should not generate an _id'() {
         given:
-        def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))],
-                ordered, ACKNOWLEDGED, false)
+        def operation = new MixedBulkWriteOperation(getNamespace(),
+                [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], ordered, ACKNOWLEDGED, false)
 
         when:
         BulkWriteResult result = execute(operation, async)
@@ -399,11 +400,11 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         given:
         def id = new ObjectId()
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)),
-                                                                new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))),
-                                                                REPLACE)
-                                                      .upsert(true)],
-                                             true, ACKNOWLEDGED, false)
+                [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)),
+                        new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))),
+                        REPLACE)
+                         .upsert(true)],
+                true, ACKNOWLEDGED, false)
 
         when:
         execute(operation, async)
@@ -546,15 +547,15 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1), new Document('_id', 2))
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
-                                                                new BsonDocument('_id', new BsonInt32(1))
-                                                                        .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])),
-                                                                REPLACE),
-                                              new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)),
-                                                                new BsonDocument('_id', new BsonInt32(2))
-                                                                        .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])),
-                                                                REPLACE)],
-                                             true, ACKNOWLEDGED, false)
+                [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
+                        new BsonDocument('_id', new BsonInt32(1))
+                                .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])),
+                        REPLACE),
+                 new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)),
+                         new BsonDocument('_id', new BsonInt32(2))
+                                 .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])),
+                         REPLACE)],
+                true, ACKNOWLEDGED, false)
 
         when:
         BulkWriteResult result = execute(operation, async)
@@ -636,13 +637,14 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
     def 'should handle multi-length runs of UNACKNOWLEDGED insert, update, replace, and remove'() {
         given:
         getCollectionHelper().insertDocuments(getTestInserts())
-        def operation = new MixedBulkWriteOperation(getNamespace(),  getTestWrites(), ordered, UNACKNOWLEDGED, false)
+        def operation = new MixedBulkWriteOperation(getNamespace(),  getTestWrites(), ordered, UNACKNOWLEDGED,
+                false)
         def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding()
 
         when:
         def result = execute(operation, binding)
-        execute(new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED,
-                false,), binding)
+        execute(new MixedBulkWriteOperation(namespace,
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, false,), binding)
 
         then:
         !result.wasAcknowledged()
@@ -710,12 +712,12 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
     def 'error details should have correct index on ordered write failure'() {
         given:
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))),
-                                              new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
-                                                                new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))),
-                                                                UPDATE),
-                                              new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2
-                                             ], true, ACKNOWLEDGED, false)
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))),
+                 new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
+                         new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))),
+                         UPDATE),
+                 new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2
+                ], true, ACKNOWLEDGED, false)
         when:
         execute(operation, async)
 
@@ -733,12 +735,12 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         given:
         getCollectionHelper().insertDocuments(getTestInserts())
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))),
-                                              new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)),
-                                                                new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))),
-                                                                UPDATE),
-                                              new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2
-                                             ], false, ACKNOWLEDGED, false)
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))),
+                 new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)),
+                         new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))),
+                         UPDATE),
+                 new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2
+                ], false, ACKNOWLEDGED, false)
         when:
         execute(operation, async)
 
@@ -804,8 +806,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
     def 'should throw bulk write exception with a write concern error when wtimeout is exceeded'() {
         given:
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))],
-                                             false, new WriteConcern(5, 1), false)
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))],
+                false, new WriteConcern(5, 1), false)
         when:
         execute(operation, async)
 
@@ -823,9 +825,9 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         given:
         getCollectionHelper().insertDocuments(getTestInserts())
         def operation = new MixedBulkWriteOperation(getNamespace(),
-                                             [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))),
-                                              new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))   // duplicate key
-                                             ], false, new WriteConcern(4, 1), false)
+                [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))),
+                 new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))   // duplicate key
+                ], false, new WriteConcern(4, 1), false)
 
         when:
         execute(operation, async)  // This is assuming that it won't be able to replicate to 4 servers in 1 ms
@@ -926,8 +928,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat
         def collectionHelper = getCollectionHelper(namespace)
         collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions(
                 new ValidationOptions().validator(gte('level', 10))))
-        def operation = new MixedBulkWriteOperation(namespace, [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered,
-                ACKNOWLEDGED, false)
+        def operation = new MixedBulkWriteOperation(namespace,
+                [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, ACKNOWLEDGED, false)
 
         when:
         execute(operation, async)
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy
index 56c0029786c..043c6de48a3 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy
@@ -35,61 +35,43 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan
 class RenameCollectionOperationSpecification extends OperationFunctionalSpecification {
 
     def cleanup() {
-        new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding())
+        new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection'),
+                WriteConcern.ACKNOWLEDGED).execute(getBinding())
     }
 
     def 'should return rename a collection'() {
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
         assert collectionNameExists(getCollectionName())
+        def operation = new RenameCollectionOperation(getNamespace(),
+                new MongoNamespace(getDatabaseName(), 'newCollection'), null)
 
         when:
-        new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding())
+        execute(operation, async)
 
         then:
         !collectionNameExists(getCollectionName())
         collectionNameExists('newCollection')
-    }
-
-
-    def 'should return rename a collection asynchronously'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
-        assert collectionNameExists(getCollectionName())
-
-        when:
-        executeAsync(new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection')))
 
-        then:
-        !collectionNameExists(getCollectionName())
-        collectionNameExists('newCollection')
+        where:
+        async << [true, false]
     }
 
     def 'should throw if not drop and collection exists'() {
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
         assert collectionNameExists(getCollectionName())
+        def operation = new RenameCollectionOperation(getNamespace(), getNamespace(), null)
 
         when:
-        new RenameCollectionOperation(getNamespace(), getNamespace()).execute(getBinding())
+        execute(operation, async)
 
         then:
         thrown(MongoServerException)
         collectionNameExists(getCollectionName())
-    }
 
-
-    def 'should throw if not drop and collection exists asynchronously'() {
-        given:
-        getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
-        assert collectionNameExists(getCollectionName())
-
-        when:
-        executeAsync(new RenameCollectionOperation(getNamespace(), getNamespace()))
-
-        then:
-        thrown(MongoServerException)
-        collectionNameExists(getCollectionName())
+        where:
+        async << [true, false]
     }
 
     @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() })
@@ -97,8 +79,8 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific
         given:
         getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection'))
         assert collectionNameExists(getCollectionName())
-        def operation = new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'),
-                new WriteConcern(5))
+        def operation = new RenameCollectionOperation(getNamespace(),
+                new MongoNamespace(getDatabaseName(), 'newCollection'), new WriteConcern(5))
 
         when:
         async ? executeAsync(operation) : operation.execute(getBinding())
@@ -112,7 +94,6 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific
         async << [true, false]
     }
 
-
     def collectionNameExists(String collectionName) {
         def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding())
         if (!cursor.hasNext()) {
diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java
index 731f83c3c53..0eeeff8bdb4 100644
--- a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java
+++ b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java
@@ -23,19 +23,15 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.ServerCursor;
 import com.mongodb.async.FutureResultCallback;
-import com.mongodb.internal.IgnorableRequestContext;
-import com.mongodb.internal.binding.StaticBindingContext;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Connection;
-import com.mongodb.internal.connection.NoOpSessionContext;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.validator.NoOpFieldNameValidator;
 import org.bson.BsonDocument;
 import org.bson.BsonInt64;
 import org.bson.BsonString;
 import org.bson.codecs.BsonDocumentCodec;
 
-import static com.mongodb.ClusterFixture.getServerApi;
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 
 final class TestOperationHelper {
 
@@ -60,10 +56,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv
                 connection.command(namespace.getDatabaseName(),
                         new BsonDocument("getMore", new BsonInt64(serverCursor.getId()))
                                 .append("collection", new BsonString(namespace.getCollectionName())),
-                        new NoOpFieldNameValidator(), ReadPreference.primary(),
-                        new BsonDocumentCodec(),
-                        new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE,
-                                new OperationContext())));
+                        new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT));
     }
 
     static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor,
@@ -73,9 +66,7 @@ static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final Serv
             connection.commandAsync(namespace.getDatabaseName(),
                     new BsonDocument("getMore", new BsonInt64(serverCursor.getId()))
                             .append("collection", new BsonString(namespace.getCollectionName())),
-                    new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(),
-                    new StaticBindingContext(new NoOpSessionContext(), getServerApi(), IgnorableRequestContext.INSTANCE,
-                            new OperationContext()), callback);
+                    new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT, callback);
             callback.get();
         });
     }
diff --git a/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java
new file mode 100644
index 00000000000..226b035151c
--- /dev/null
+++ b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.test;
+
+import com.mongodb.test.extension.FlakyTestExtension;
+import org.junit.jupiter.api.TestInfo;
+import org.junit.jupiter.api.TestTemplate;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.api.parallel.Execution;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static org.junit.jupiter.api.parallel.ExecutionMode.SAME_THREAD;
+
+/**
+ * {@code @FlakyTest} is used to signal that the annotated method contains a flaky / racy test.
+ *
+ * <p>The test will be repeated up to a {@linkplain #maxAttempts maximum number of times} with a
+ * configurable {@linkplain #name display name}. Each invocation will be repeated if the previous test fails.
+ */
+@Target({ElementType.METHOD, ElementType.TYPE})
+@Retention(RetentionPolicy.RUNTIME)
+@Execution(SAME_THREAD) // cannot be run in parallel
+@ExtendWith(FlakyTestExtension.class)
+@TestTemplate
+public @interface FlakyTest {
+
+    /**
+     * Placeholder for the {@linkplain TestInfo#getDisplayName display name} of
+     * a {@code @RepeatedTest} method: <code>{displayName}</code>
+     */
+    String DISPLAY_NAME_PLACEHOLDER = "{displayName}";
+
+    /**
+     * Placeholder for the current repetition count of a {@code @FlakyTest}
+     * method: <code>{index}</code>
+     */
+    String CURRENT_REPETITION_PLACEHOLDER = "{index}";
+
+    /**
+     * Placeholder for the total number of repetitions of a {@code @FlakyTest}
+     * method: <code>{totalRepetitions}</code>
+     */
+    String TOTAL_REPETITIONS_PLACEHOLDER = "{totalRepetitions}";
+
+    /**
+     * <em>Short</em> display name pattern for a repeated test: {@value #SHORT_DISPLAY_NAME}
+     *
+     * @see #CURRENT_REPETITION_PLACEHOLDER
+     * @see #TOTAL_REPETITIONS_PLACEHOLDER
+     * @see #LONG_DISPLAY_NAME
+     */
+    String SHORT_DISPLAY_NAME = "Attempt: " + CURRENT_REPETITION_PLACEHOLDER + " / " + TOTAL_REPETITIONS_PLACEHOLDER;
+
+    /**
+     * <em>Long</em> display name pattern for a repeated test: {@value #LONG_DISPLAY_NAME}
+     *
+     * @see #DISPLAY_NAME_PLACEHOLDER
+     * @see #SHORT_DISPLAY_NAME
+     */
+    String LONG_DISPLAY_NAME = DISPLAY_NAME_PLACEHOLDER + " " + SHORT_DISPLAY_NAME;
+
+    /**
+     * max number of attempts
+     *
+     * @return N-times repeat test if it failed
+     */
+    int maxAttempts() default 1;
+
+    /**
+     * Display name for test method
+     *
+     * @return Short name
+     */
+    String name() default LONG_DISPLAY_NAME;
+}
diff --git a/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java
new file mode 100644
index 00000000000..55ddd7a001e
--- /dev/null
+++ b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.test.extension;
+
+import com.mongodb.test.FlakyTest;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.AfterTestExecutionCallback;
+import org.junit.jupiter.api.extension.BeforeTestExecutionCallback;
+import org.junit.jupiter.api.extension.ConditionEvaluationResult;
+import org.junit.jupiter.api.extension.ExecutionCondition;
+import org.junit.jupiter.api.extension.Extension;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.jupiter.api.extension.TestExecutionExceptionHandler;
+import org.junit.jupiter.api.extension.TestInstantiationException;
+import org.junit.jupiter.api.extension.TestTemplateInvocationContext;
+import org.junit.jupiter.api.extension.TestTemplateInvocationContextProvider;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.platform.commons.util.Preconditions;
+import org.opentest4j.TestAbortedException;
+
+import java.lang.reflect.Method;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Spliterator;
+import java.util.stream.Stream;
+
+import static com.mongodb.test.FlakyTest.CURRENT_REPETITION_PLACEHOLDER;
+import static com.mongodb.test.FlakyTest.DISPLAY_NAME_PLACEHOLDER;
+import static com.mongodb.test.FlakyTest.TOTAL_REPETITIONS_PLACEHOLDER;
+import static java.util.Collections.singletonList;
+import static java.util.Spliterators.spliteratorUnknownSize;
+import static java.util.stream.StreamSupport.stream;
+import static org.junit.platform.commons.util.AnnotationUtils.findAnnotation;
+import static org.junit.platform.commons.util.AnnotationUtils.isAnnotated;
+
+
+/**
+ * A {@code TestTemplateInvocationContextProvider} that supports the {@link FlakyTest @FlakyTest} annotation.
+ */
+public class FlakyTestExtension implements TestTemplateInvocationContextProvider,
+        BeforeTestExecutionCallback,
+        AfterTestExecutionCallback,
+        TestExecutionExceptionHandler {
+
+    private int maxAttempts = 0;
+    private FlakyTestDisplayFormatter formatter;
+    private Boolean testHasPassed;
+    private int currentAttempt = 0;
+
+
+    @Override
+    public void afterTestExecution(final ExtensionContext extensionContext) {
+        testHasPassed = extensionContext.getExecutionException().map(e -> e instanceof TestInstantiationException).orElse(true);
+    }
+
+    @Override
+    public boolean supportsTestTemplate(final ExtensionContext context) {
+        return isAnnotated(context.getTestMethod(), FlakyTest.class);
+    }
+
+    @Override
+    public Stream<TestTemplateInvocationContext> provideTestTemplateInvocationContexts(final ExtensionContext context) {
+        Method testMethod = context.getRequiredTestMethod();
+        String displayName = context.getDisplayName();
+
+        if (isAnnotated(testMethod, Test.class)) {
+            throw new TestInstantiationException(String.format("Test %s also annotated with @Test", displayName));
+        } else if (isAnnotated(testMethod, ParameterizedTest.class)) {
+            throw new TestInstantiationException(String.format("Test %s also annotated with @ParameterizedTest", displayName));
+        }
+
+        FlakyTest flakyTest = findAnnotation(testMethod, FlakyTest.class)
+                .orElseThrow(() ->
+                        new TestInstantiationException("The extension should not be executed unless the test method is "
+                                + "annotated with @FlakyTest."));
+
+        formatter = displayNameFormatter(flakyTest, testMethod, displayName);
+
+        maxAttempts = flakyTest.maxAttempts();
+        Preconditions.condition(maxAttempts > 0, "Total repeats must be higher than 0");
+
+        //Convert logic of repeated handler to spliterator
+        Spliterator<TestTemplateInvocationContext> spliterator =
+                spliteratorUnknownSize(new TestTemplateIterator(), Spliterator.NONNULL);
+        return stream(spliterator, false);
+    }
+
+    private FlakyTestDisplayFormatter displayNameFormatter(final FlakyTest flakyTest, final Method method,
+            final String displayName) {
+        String pattern = Preconditions.notBlank(flakyTest.name().trim(), () -> String.format(
+                "Configuration error: @FlakyTest on method [%s] must be declared with a non-empty name.", method));
+        return new FlakyTestDisplayFormatter(pattern, displayName);
+    }
+
+    @Override
+    public void handleTestExecutionException(final ExtensionContext context, final Throwable throwable) throws Throwable {
+        if (currentAttempt < maxAttempts) {
+            // Mark failure as skipped / aborted so to pass CI
+            throw new TestAbortedException("Test failed on attempt: " + currentAttempt);
+        }
+        throw throwable;
+    }
+
+    @Override
+    public void beforeTestExecution(final ExtensionContext context) {
+        currentAttempt++;
+    }
+
+    /**
+     * TestTemplateIterator (Repeat test if it failed)
+     */
+    class TestTemplateIterator implements Iterator<TestTemplateInvocationContext> {
+        private int currentIndex = 0;
+
+        @Override
+        public boolean hasNext() {
+            if (currentIndex == 0) {
+                return true;
+            }
+            return !testHasPassed && currentIndex < maxAttempts;
+        }
+
+        @Override
+        public TestTemplateInvocationContext next() {
+            if (hasNext()) {
+                currentIndex++;
+                return new RepeatInvocationContext(currentIndex, maxAttempts, formatter);
+            }
+            throw new NoSuchElementException();
+        }
+
+        @Override
+        public void remove() {
+            throw new UnsupportedOperationException();
+        }
+    }
+
+    static class RepeatInvocationContext implements TestTemplateInvocationContext {
+        private final int currentRepetition;
+        private final int totalTestRuns;
+        private final FlakyTestDisplayFormatter formatter;
+
+        RepeatInvocationContext(final int currentRepetition, final int totalRepetitions, final FlakyTestDisplayFormatter formatter) {
+            this.currentRepetition = currentRepetition;
+            this.totalTestRuns = totalRepetitions;
+            this.formatter = formatter;
+        }
+
+        @Override
+        public String getDisplayName(final int invocationIndex) {
+            return formatter.format(currentRepetition, totalTestRuns);
+        }
+
+        @Override
+        public List<Extension> getAdditionalExtensions() {
+            return singletonList((ExecutionCondition) context -> {
+                if (currentRepetition > totalTestRuns) {
+                    return ConditionEvaluationResult.disabled("All attempts failed");
+                } else {
+                    return ConditionEvaluationResult.enabled("Test failed - retry");
+                }
+            });
+        }
+    }
+
+    static class FlakyTestDisplayFormatter {
+        private final String pattern;
+        private final String displayName;
+
+        FlakyTestDisplayFormatter(final String pattern, final String displayName) {
+            this.pattern = pattern;
+            this.displayName = displayName;
+        }
+
+        String format(final int currentRepetition, final int totalRepetitions) {
+            return pattern
+                    .replace(DISPLAY_NAME_PLACEHOLDER, displayName)
+                    .replace(CURRENT_REPETITION_PLACEHOLDER, String.valueOf(currentRepetition))
+                    .replace(TOTAL_REPETITIONS_PLACEHOLDER, String.valueOf(totalRepetitions));
+        }
+
+    }
+
+}
diff --git a/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json b/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json
index 443aa0aa232..247541646cc 100644
--- a/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json
+++ b/driver-core/src/test/resources/client-side-encryption/legacy/timeoutMS.json
@@ -4,6 +4,7 @@
       "minServerVersion": "4.4"
     }
   ],
+  "comment": "Updated timeoutMS and blockTimeMS manually to address race conditions in tests with SSL handshake.",
   "database_name": "cse-timeouts-db",
   "collection_name": "cse-timeouts-coll",
   "data": [],
@@ -110,7 +111,7 @@
             "listCollections"
           ],
           "blockConnection": true,
-          "blockTimeMS": 60
+          "blockTimeMS": 250
         }
       },
       "clientOptions": {
@@ -119,7 +120,7 @@
             "aws": {}
           }
         },
-        "timeoutMS": 50
+        "timeoutMS": 200
       },
       "operations": [
         {
@@ -161,7 +162,7 @@
       "failPoint": {
         "configureFailPoint": "failCommand",
         "mode": {
-          "times": 3
+          "times": 2
         },
         "data": {
           "failCommands": [
@@ -169,7 +170,7 @@
             "find"
           ],
           "blockConnection": true,
-          "blockTimeMS": 20
+          "blockTimeMS": 300
         }
       },
       "clientOptions": {
@@ -178,7 +179,7 @@
             "aws": {}
           }
         },
-        "timeoutMS": 50
+        "timeoutMS": 500
       },
       "operations": [
         {
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md
new file mode 100644
index 00000000000..b4160500f54
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.md
@@ -0,0 +1,618 @@
+# Client Side Operations Timeouts Tests
+
+______________________________________________________________________
+
+## Introduction
+
+This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests
+are broken up into automated YAML/JSON tests and additional prose tests.
+
+## Spec Tests
+
+This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test
+Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of
+them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these
+tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another
+with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and
+`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters.
+
+## Prose Tests
+
+There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST
+create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a
+test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests
+MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to
+listen for `command_started` events.
+
+### 1. Multi-batch writes
+
+This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an
+exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow
+for differing bulk encoding performance.
+
+1. Using `internalClient`, drop the `db.coll` collection.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: {
+           times: 2
+       },
+       data: {
+           failCommands: ["insert"],
+           blockConnection: true,
+           blockTimeMS: 1010
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`.
+
+4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call.
+
+   - Expect this to fail with a timeout error.
+
+5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call.
+
+### 2. maxTimeMS is not set for commands sent to mongocryptd
+
+This test MUST only be run against enterprise server versions 4.2 and higher.
+
+1. Launch a mongocryptd process on 23000.
+2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`.
+3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database.
+4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field.
+
+### 3. ClientEncryption
+
+Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY`
+refers to the following base64:
+
+```javascript
+Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk
+```
+
+For each test, perform the following setup:
+
+1. Using `internalClient`, drop and create the `keyvault.datakeys` collection.
+
+2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`.
+
+3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this
+   object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map:
+
+   ```javascript
+   {
+       "local": { "key": <base64 decoding of LOCAL_MASTERKEY> }
+   }
+   ```
+
+#### createDataKey
+
+1. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: {
+           times: 1
+       },
+       data: {
+           failCommands: ["insert"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+2. Call `clientEncryption.createDataKey()` with the `local` KMS provider.
+
+   - Expect this to fail with a timeout error.
+
+3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call.
+
+#### encrypt
+
+1. Call `client_encryption.createDataKey()` with the `local` KMS provider.
+
+   - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: {
+           times: 1
+       },
+       data: {
+           failCommands: ["find"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm
+   `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`.
+
+   - Expect this to fail with a timeout error.
+
+4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call.
+
+#### decrypt
+
+1. Call `clientEncryption.createDataKey()` with the `local` KMS provider.
+
+   - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`.
+
+2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm
+   `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`.
+
+   - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`.
+
+3. Close and re-create the `keyVaultClient` and `clientEncryption` objects.
+
+4. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: {
+           times: 1
+       },
+       data: {
+           failCommands: ["find"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+5. Call `clientEncryption.decrypt()` with the value `encrypted`.
+
+   - Expect this to fail with a timeout error.
+
+6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call.
+
+### 4. Background Connection Pooling
+
+The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields
+(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for
+some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are
+not published within that time.
+
+#### timeoutMS used for handshake commands
+
+1. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: {
+           times: 1
+       },
+       data: {
+           failCommands: ["saslContinue"],
+           blockConnection: true,
+           blockTimeMS: 15,
+           appName: "timeoutBackgroundPoolTest"
+       }
+   }
+   ```
+
+2. Create a MongoClient (referred to as `client`) configured with the following:
+
+   - `minPoolSize` of 1
+   - `timeoutMS` of 10
+   - `appName` of `timeoutBackgroundPoolTest`
+   - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events.
+
+3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published.
+
+#### timeoutMS is refreshed for each handshake command
+
+1. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: "alwaysOn",
+       data: {
+           failCommands: ["hello", "isMaster", "saslContinue"],
+           blockConnection: true,
+           blockTimeMS: 15,
+           appName: "refreshTimeoutBackgroundPoolTest"
+       }
+   }
+   ```
+
+2. Create a MongoClient (referred to as `client`) configured with the following:
+
+   - `minPoolSize` of 1
+   - `timeoutMS` of 20
+   - `appName` of `refreshTimeoutBackgroundPoolTest`
+   - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events.
+
+3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published.
+
+### 5. Blocking Iteration Methods
+
+Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a
+blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an
+error occurs.
+
+#### Tailable cursors
+
+1. Using `internalClient`, drop the `db.coll` collection.
+
+2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`.
+
+3. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: "alwaysOn",
+       data: {
+           failCommands: ["getMore"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`.
+
+5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`.
+
+   - Expect this to succeed and return a cursor with a non-zero ID.
+
+6. Call either a blocking or non-blocking iteration method on the cursor.
+
+   - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command.
+
+7. Call the blocking iteration method on the resulting cursor.
+
+   - Expect this to fail with a timeout error.
+
+8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the
+   test.
+
+#### Change Streams
+
+1. Using `internalClient`, drop the `db.coll` collection.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: "alwaysOn",
+       data: {
+           failCommands: ["getMore"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`.
+
+4. Using `client`, use the `watch` helper to create a change stream against `db.coll`.
+
+   - Expect this to succeed and return a change stream with a non-zero ID.
+
+5. Call the blocking iteration method on the resulting change stream.
+
+   - Expect this to fail with a timeout error.
+
+6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during
+   the test.
+
+### 6. GridFS - Upload
+
+Tests in this section MUST only be run against server versions 4.4 and higher.
+
+#### uploads via openUploadStream can be timed out
+
+1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: { times: 1 },
+       data: {
+           failCommands: ["insert"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`.
+
+4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database.
+
+5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as
+   `uploadStream`).
+
+   - Expect this to succeed and return a non-null stream.
+
+6. Using `uploadStream`, upload a single `0x12` byte.
+
+7. Call `uploadStream.close()` to flush the stream and insert chunks.
+
+   - Expect this to fail with a timeout error.
+
+#### Aborting an upload stream can be timed out
+
+This test only applies to drivers that provide an API to abort a GridFS upload stream.
+
+1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: { times: 1 },
+       data: {
+           failCommands: ["delete"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`.
+
+4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with
+   `chunkSizeBytes=2`.
+
+5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as
+   `uploadStream`).
+
+   - Expect this to succeed and return a non-null stream.
+
+6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`.
+
+7. Call `uploadStream.abort()`.
+
+   - Expect this to fail with a timeout error.
+
+### 7. GridFS - Download
+
+This test MUST only be run against server versions 4.4 and higher.
+
+1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections.
+
+2. Using `internalClient`, insert the following document into the `db.fs.files` collection:
+
+   ```javascript
+   {
+      "_id": {
+        "$oid": "000000000000000000000005"
+      },
+      "length": 10,
+      "chunkSize": 4,
+      "uploadDate": {
+        "$date": "1970-01-01T00:00:00.000Z"
+      },
+      "md5": "57d83cd477bfb1ccd975ab33d827a92b",
+      "filename": "length-10",
+      "contentType": "application/octet-stream",
+      "aliases": [],
+      "metadata": {}
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`.
+
+4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database.
+
+5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream
+   (referred to as `downloadStream`).
+
+   - Expect this to succeed and return a non-null stream.
+
+6. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: "failCommand",
+       mode: { times: 1 },
+       data: {
+           failCommands: ["find"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+7. Read from the `downloadStream`.
+
+   - Expect this to fail with a timeout error.
+
+8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against
+   `db.fs.chunks`.
+
+### 8. Server Selection
+
+#### serverSelectionTimeoutMS honored if timeoutMS is not set
+
+1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`.
+2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database.
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS
+
+1. Create a MongoClient (referred to as `client`) with URI
+   `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`.
+2. Using `client`, run the command `{ ping: 1 }` against the `admin` database.
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS
+
+1. Create a MongoClient (referred to as `client`) with URI
+   `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`.
+2. Using `client`, run the command `{ ping: 1 }` against the `admin` database.
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0
+
+1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`.
+2. Using `client`, run the command `{ ping: 1 }` against the `admin` database.
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS
+
+This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username
+and password).
+
+1. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: failCommand,
+       mode: { times: 1 },
+       data: {
+           failCommands: ["saslContinue"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`.
+
+3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`.
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS
+
+This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username
+and password).
+
+1. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: failCommand,
+       mode: { times: 1 },
+       data: {
+           failCommands: ["saslContinue"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`.
+
+3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`.
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+### 9. endSession
+
+This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run
+three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via
+the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the
+`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds.
+
+1. Using `internalClient`, drop the `db.coll` collection.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: failCommand,
+       mode: { times: 1 },
+       data: {
+           failCommands: ["abortTransaction"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient
+   (referred to as `session`).
+
+4. Execute the following code:
+
+   ```typescript
+   coll = client.database("db").collection("coll")
+   session.start_transaction()
+   coll.insert_one({x: 1}, session=session)
+   ```
+
+5. Using `session`, execute `session.end_session`
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+### 10. Convenient Transactions
+
+Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher.
+
+#### timeoutMS is refreshed for abortTransaction if the callback fails
+
+1. Using `internalClient`, drop the `db.coll` collection.
+
+2. Using `internalClient`, set the following fail point:
+
+   ```javascript
+   {
+       configureFailPoint: failCommand,
+       mode: { times: 2 },
+       data: {
+           failCommands: ["insert", "abortTransaction"],
+           blockConnection: true,
+           blockTimeMS: 15
+       }
+   }
+   ```
+
+3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession
+   derived from that MongoClient (referred to as `session`).
+
+4. Using `session`, execute a `withTransaction` operation with the following callback:
+
+   ```typescript
+   def callback() {
+       coll = client.database("db").collection("coll")
+       coll.insert_one({ _id: 1 }, session=session)
+   }
+   ```
+
+5. Expect the previous `withTransaction` call to fail with a timeout error.
+
+6. Verify that the following events were published during the `withTransaction` call:
+
+   1. `command_started` and `command_failed` events for an `insert` command.
+   2. `command_started` and `command_failed` events for an `abortTransaction` command.
+
+## Unit Tests
+
+The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement
+these if it is possible to do so using the driver's existing test infrastructure.
+
+- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set.
+- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a
+  server has been selected.
+- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server
+  has been selected.
+- If a new connection is required to execute an operation,
+  `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment.
+- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should
+  apply to HTTP requests against OCSP responders.
+- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts.
+- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE.
+- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption.
+- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment.
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst
new file mode 100644
index 00000000000..8a6bba61dac
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/README.rst
@@ -0,0 +1,616 @@
+======================================
+Client Side Operations Timeouts Tests
+======================================
+
+.. contents::
+
+----
+
+Introduction
+============
+
+This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests
+are broken up into automated YAML/JSON tests and additional prose tests.
+
+Spec Tests
+==========
+
+This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test
+Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some
+of them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute
+these tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and
+another with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the ``single-node-auth.json``
+and ``single-node-auth-ssl.json`` files in the ``drivers-evergreen-tools`` repository to create these clusters.
+
+Prose Tests
+===========
+
+There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST
+create a MongoClient without the ``timeoutMS`` option set (referred to as ``internalClient``). Any fail points set
+during a test MUST be unset using ``internalClient`` after the test has been executed. All MongoClient instances
+created for tests MUST be configured with read/write concern ``majority``, read preference ``primary``, and command
+monitoring enabled to listen for ``command_started`` events.
+
+1. Multi-batch writes
+~~~~~~~~~~~~~~~~~~~~~
+
+This test MUST only run against standalones on server versions 4.4 and higher.
+The ``insertMany`` call takes an exceedingly long time on replicasets and sharded
+clusters. Drivers MAY adjust the timeouts used in this test to allow for differing
+bulk encoding performance.
+
+#. Using ``internalClient``, drop the ``db.coll`` collection.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: {
+               times: 2
+           },
+           data: {
+               failCommands: ["insert"],
+               blockConnection: true,
+               blockTimeMS: 1010
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=2000``.
+#. Using ``client``, insert 50 1-megabyte documents in a single ``insertMany`` call.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that two ``insert`` commands were executed against ``db.coll`` as part of the ``insertMany`` call.
+
+2. maxTimeMS is not set for commands sent to mongocryptd
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This test MUST only be run against enterprise server versions 4.2 and higher.
+
+#. Launch a mongocryptd process on 23000.
+#. Create a MongoClient (referred to as ``client``) using the URI ``mongodb://localhost:23000/?timeoutMS=1000``.
+#. Using ``client``, execute the ``{ ping: 1 }`` command against the ``admin`` database.
+#. Verify via command monitoring that the ``ping`` command sent did not contain a ``maxTimeMS`` field.
+
+3. ClientEncryption
+~~~~~~~~~~~~~~~~~~~
+
+Each test under this category MUST only be run against server versions 4.4 and higher. In these tests,
+``LOCAL_MASTERKEY`` refers to the following base64:
+
+.. code:: javascript
+
+  Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk
+
+For each test, perform the following setup:
+
+#. Using ``internalClient``, drop and create the ``keyvault.datakeys`` collection.
+#. Create a MongoClient (referred to as ``keyVaultClient``) with ``timeoutMS=10``.
+#. Create a ``ClientEncryption`` object that wraps ``keyVaultClient`` (referred to as ``clientEncryption``). Configure this object with ``keyVaultNamespace`` set to ``keyvault.datakeys`` and the following KMS providers map:
+
+   .. code:: javascript
+
+       {
+           "local": { "key": <base64 decoding of LOCAL_MASTERKEY> }
+       }
+
+createDataKey
+`````````````
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: {
+               times: 1
+           },
+           data: {
+               failCommands: ["insert"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that an ``insert`` command was executed against to ``keyvault.datakeys`` as part of the ``createDataKey`` call.
+
+encrypt
+```````
+
+#. Call ``client_encryption.createDataKey()`` with the ``local`` KMS provider.
+
+   - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakeyId``.
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: {
+               times: 1
+           },
+           data: {
+               failCommands: ["find"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``datakeyId``.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``encrypt`` call.
+
+decrypt
+```````
+
+#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider.
+
+   - Expect this to return a BSON binary with subtype 4, referred to as ``dataKeyId``.
+
+#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``dataKeyId``.
+
+   - Expect this to return a BSON binary with subtype 6, referred to as ``encrypted``.
+
+#. Close and re-create the ``keyVaultClient`` and ``clientEncryption`` objects.
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: {
+               times: 1
+           },
+           data: {
+               failCommands: ["find"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Call ``clientEncryption.decrypt()`` with the value ``encrypted``.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``decrypt`` call.
+
+4. Background Connection Pooling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication
+fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait
+for some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events
+are not published within that time.
+
+timeoutMS used for handshake commands
+`````````````````````````````````````
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: {
+               times: 1
+           },
+           data: {
+               failCommands: ["saslContinue"],
+               blockConnection: true,
+               blockTimeMS: 15,
+               appName: "timeoutBackgroundPoolTest"
+           }
+       }
+
+#. Create a MongoClient (referred to as ``client``) configured with the following:
+
+   - ``minPoolSize`` of 1
+   - ``timeoutMS`` of 10
+   - ``appName`` of ``timeoutBackgroundPoolTest``
+   - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionClosedEvent`` events.
+
+#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionClosedEvent`` to be published.
+
+timeoutMS is refreshed for each handshake command
+`````````````````````````````````````````````````
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: "alwaysOn",
+           data: {
+               failCommands: ["hello", "isMaster", "saslContinue"],
+               blockConnection: true,
+               blockTimeMS: 15,
+               appName: "refreshTimeoutBackgroundPoolTest"
+           }
+       }
+
+#. Create a MongoClient (referred to as ``client``) configured with the following:
+
+   - ``minPoolSize`` of 1
+   - ``timeoutMS`` of 20
+   - ``appName`` of ``refreshTimeoutBackgroundPoolTest``
+   - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionReady`` events.
+
+#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionReady`` to be published.
+
+5. Blocking Iteration Methods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a
+blocking method for cursor iteration that executes ``getMore`` commands in a loop until a document is available or an
+error occurs.
+
+Tailable cursors
+````````````````
+
+#. Using ``internalClient``, drop the ``db.coll`` collection.
+#. Using ``internalClient``, insert the document ``{ x: 1 }`` into ``db.coll``.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: "alwaysOn",
+           data: {
+               failCommands: ["getMore"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``.
+#. Using ``client``, create a tailable cursor on ``db.coll`` with ``cursorType=tailable``.
+
+   - Expect this to succeed and return a cursor with a non-zero ID.
+
+#. Call either a blocking or non-blocking iteration method on the cursor.
+
+   - Expect this to succeed and return the document ``{ x: 1 }`` without sending a ``getMore`` command.
+
+#. Call the blocking iteration method on the resulting cursor.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that a ``find`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test.
+
+Change Streams
+``````````````
+
+#. Using ``internalClient``, drop the ``db.coll`` collection.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: "alwaysOn",
+           data: {
+               failCommands: ["getMore"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``.
+#. Using ``client``, use the ``watch`` helper to create a change stream against ``db.coll``.
+
+   - Expect this to succeed and return a change stream with a non-zero ID.
+
+#. Call the blocking iteration method on the resulting change stream.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that an ``aggregate`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test.
+
+6. GridFS - Upload
+~~~~~~~~~~~~~~~~~~
+
+Tests in this section MUST only be run against server versions 4.4 and higher.
+
+uploads via openUploadStream can be timed out
+`````````````````````````````````````````````
+
+#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: { times: 1 },
+           data: {
+               failCommands: ["insert"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``.
+#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database.
+#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``).
+
+   - Expect this to succeed and return a non-null stream.
+
+#. Using ``uploadStream``, upload a single ``0x12`` byte.
+#. Call ``uploadStream.close()`` to flush the stream and insert chunks.
+
+   - Expect this to fail with a timeout error.
+
+Aborting an upload stream can be timed out
+``````````````````````````````````````````
+
+This test only applies to drivers that provide an API to abort a GridFS upload stream.
+
+#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: { times: 1 },
+           data: {
+               failCommands: ["delete"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``.
+#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database with ``chunkSizeBytes=2``.
+#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``).
+
+   - Expect this to succeed and return a non-null stream.
+
+#. Using ``uploadStream``, upload the bytes ``[0x01, 0x02, 0x03, 0x04]``.
+#. Call ``uploadStream.abort()``.
+
+   - Expect this to fail with a timeout error.
+
+7. GridFS - Download
+~~~~~~~~~~~~~~~~~~~~
+
+This test MUST only be run against server versions 4.4 and higher.
+
+#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections.
+#. Using ``internalClient``, insert the following document into the ``db.fs.files`` collection:
+
+   .. code:: javascript
+
+       {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "length": 10,
+          "chunkSize": 4,
+          "uploadDate": {
+            "$date": "1970-01-01T00:00:00.000Z"
+          },
+          "md5": "57d83cd477bfb1ccd975ab33d827a92b",
+          "filename": "length-10",
+          "contentType": "application/octet-stream",
+          "aliases": [],
+          "metadata": {}
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``.
+#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database.
+#. Call ``bucket.open_download_stream`` with the id ``{ "$oid": "000000000000000000000005" }`` to create a download stream (referred to as ``downloadStream``).
+
+   - Expect this to succeed and return a non-null stream.
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: "failCommand",
+           mode: { times: 1 },
+           data: {
+               failCommands: ["find"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Read from the ``downloadStream``.
+
+   - Expect this to fail with a timeout error.
+
+#. Verify that two ``find`` commands were executed during the read: one against ``db.fs.files`` and another against ``db.fs.chunks``.
+
+8. Server Selection
+~~~~~~~~~~~~~~~~~~~
+
+serverSelectionTimeoutMS honored if timeoutMS is not set
+````````````````````````````````````````````````````````
+
+#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?serverSelectionTimeoutMS=10``.
+
+#. Using ``client``, execute the command ``{ ping: 1 }`` against the ``admin`` database.
+
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS
+``````````````````````````````````````````````````````````````````````````````````
+
+#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20``.
+
+#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database.
+
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS
+``````````````````````````````````````````````````````````````````````````````````
+
+#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10``.
+
+#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database.
+
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+serverSelectionTimeoutMS honored for server selection if timeoutMS=0
+````````````````````````````````````````````````````````````````````
+
+#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10``.
+
+#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database.
+
+   - Expect this to fail with a server selection timeout error after no more than 15ms.
+
+timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS
+```````````````````````````````````````````````````````````````````````````````````````````````
+
+This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a
+username and password).
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: failCommand,
+           mode: { times: 1 },
+           data: {
+               failCommands: ["saslContinue"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10`` and ``serverSelectionTimeoutMS=20``.
+#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``.
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS
+```````````````````````````````````````````````````````````````````````````````````````````````
+
+This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a
+username and password).
+
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: failCommand,
+           mode: { times: 1 },
+           data: {
+               failCommands: ["saslContinue"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20`` and ``serverSelectionTimeoutMS=10``.
+#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``.
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+9. endSession
+~~~~~~~~~~~~~
+
+This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be
+run three times: once with the timeout specified via the MongoClient ``timeoutMS`` option, once with the timeout
+specified via the ClientSession ``defaultTimeoutMS`` option, and once more with the timeout specified via the
+``timeoutMS`` option for the ``endSession`` operation. In all cases, the timeout MUST be set to 10 milliseconds.
+
+#. Using ``internalClient``, drop the ``db.coll`` collection.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: failCommand,
+           mode: { times: 1 },
+           data: {
+               failCommands: ["abortTransaction"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) and an explicit ClientSession derived from that MongoClient (referred to as ``session``).
+#. Execute the following code:
+
+   .. code:: typescript
+
+       coll = client.database("db").collection("coll")
+       session.start_transaction()
+       coll.insert_one({x: 1}, session=session)
+
+#. Using ``session``, execute ``session.end_session``
+
+   - Expect this to fail with a timeout error after no more than 15ms.
+
+10. Convenient Transactions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher.
+
+timeoutMS is refreshed for abortTransaction if the callback fails
+`````````````````````````````````````````````````````````````````
+
+#. Using ``internalClient``, drop the ``db.coll`` collection.
+#. Using ``internalClient``, set the following fail point:
+
+   .. code:: javascript
+
+       {
+           configureFailPoint: failCommand,
+           mode: { times: 2 },
+           data: {
+               failCommands: ["insert", "abortTransaction"],
+               blockConnection: true,
+               blockTimeMS: 15
+           }
+       }
+
+#. Create a new MongoClient (referred to as ``client``) configured with ``timeoutMS=10`` and an explicit ClientSession derived from that MongoClient (referred to as ``session``).
+#. Using ``session``, execute a ``withTransaction`` operation with the following callback:
+
+   .. code:: typescript
+
+       def callback() {
+           coll = client.database("db").collection("coll")
+           coll.insert_one({ _id: 1 }, session=session)
+       }
+
+#. Expect the previous ``withTransaction`` call to fail with a timeout error.
+#. Verify that the following events were published during the ``withTransaction`` call:
+
+   #. ``command_started`` and ``command_failed`` events for an ``insert`` command.
+   #. ``command_started`` and ``command_failed`` events for an ``abortTransaction`` command.
+
+Unit Tests
+==========
+
+The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement
+these if it is possible to do so using the driver's existing test infrastructure.
+
+- Operations should ignore ``waitQueueTimeoutMS`` if ``timeoutMS`` is also set.
+- If ``timeoutMS`` is set for an operation, the remaining ``timeoutMS`` value should apply to connection checkout after a server has been selected.
+- If ``timeoutMS`` is not set for an operation, ``waitQueueTimeoutMS`` should apply to connection checkout after a server has been selected.
+- If a new connection is required to execute an operation, ``min(remaining computedServerSelectionTimeout, connectTimeoutMS)`` should apply to socket establishment.
+- For drivers that have control over OCSP behavior, ``min(remaining computedServerSelectionTimeout, 5 seconds)`` should apply to HTTP requests against OCSP responders.
+- If ``timeoutMS`` is unset, operations fail after two non-consecutive socket timeouts.
+- The remaining ``timeoutMS`` value should apply to HTTP requests against KMS servers for CSFLE.
+- The remaining ``timeoutMS`` value should apply to commands sent to mongocryptd as part of automatic encryption.
+- When doing ``minPoolSize`` maintenance, ``connectTimeoutMS`` is used as the timeout for socket establishment.
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json
new file mode 100644
index 00000000000..9a05809f77c
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/bulkWrite.json
@@ -0,0 +1,160 @@
+{
+  "description": "timeoutMS behaves correctly for bulkWrite operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "uriOptions": {
+          "w": 1
+        }
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS applied to entire bulkWrite, not individual commands",
+      "operations": [
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {}
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert",
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 120
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              },
+              {
+                "replaceOne": {
+                  "filter": {
+                    "_id": 1
+                  },
+                  "replacement": {
+                    "x": 1
+                  }
+                }
+              }
+            ],
+            "timeoutMS": 200
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json
new file mode 100644
index 00000000000..8cffb08e267
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/change-streams.json
@@ -0,0 +1,598 @@
+{
+  "description": "timeoutMS behaves correctly for change streams",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "error if maxAwaitTimeMS is greater than timeoutMS",
+      "operations": [
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 5,
+            "maxAwaitTimeMS": 10
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "error if maxAwaitTimeMS is equal to timeoutMS",
+      "operations": [
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 5,
+            "maxAwaitTimeMS": 5
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to initial aggregate",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 200
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 30
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 1050
+          },
+          "saveResultAsEntity": "changeStream"
+        },
+        {
+          "name": "iterateOnce",
+          "object": "changeStream"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 150
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 200,
+            "batchSize": 2,
+            "maxAwaitTimeMS": 1
+          },
+          "saveResultAsEntity": "changeStream"
+        },
+        {
+          "name": "iterateOnce",
+          "object": "changeStream"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to full resume attempt in a next call",
+      "operations": [
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 200
+          },
+          "saveResultAsEntity": "changeStream"
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "getMore",
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 120,
+                "errorCode": 7,
+                "errorLabels": [
+                  "ResumableChangeStreamError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "changeStream",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "change stream can be iterated again if previous iteration times out",
+      "operations": [
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "maxAwaitTimeMS": 1,
+            "timeoutMS": 200
+          },
+          "saveResultAsEntity": "changeStream"
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "changeStream",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        },
+        {
+          "name": "iterateOnce",
+          "object": "changeStream"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore - failure",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMS": 200
+          },
+          "saveResultAsEntity": "changeStream"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "changeStream",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json
new file mode 100644
index 00000000000..a8b2d724fa9
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/close-cursors.json
@@ -0,0 +1,239 @@
+{
+  "description": "timeoutMS behaves correctly when closing cursors",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent",
+          "commandSucceededEvent",
+          "commandFailedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": 0
+        },
+        {
+          "_id": 1
+        },
+        {
+          "_id": 2
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS is refreshed for close",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 200
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "batchSize": 2,
+            "timeoutMS": 200
+          },
+          "saveResultAsEntity": "cursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "cursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "cursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "cursor",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        },
+        {
+          "name": "close",
+          "object": "cursor"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find"
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "find"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore"
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "getMore"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "command": {
+                  "killCursors": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                },
+                "commandName": "killCursors"
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "killCursors"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be overridden for close",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "client",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "killCursors"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 30
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "batchSize": 2,
+            "timeoutMS": 20
+          },
+          "saveResultAsEntity": "cursor"
+        },
+        {
+          "name": "close",
+          "object": "cursor",
+          "arguments": {
+            "timeoutMS": 40
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find"
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "find"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "command": {
+                  "killCursors": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                },
+                "commandName": "killCursors"
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "killCursors"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json
new file mode 100644
index 00000000000..b9b306c7fb6
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/command-execution.json
@@ -0,0 +1,393 @@
+{
+  "description": "timeoutMS behaves correctly during command execution",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.9",
+      "topologies": [
+        "single",
+        "replicaset",
+        "sharded"
+      ],
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    },
+    {
+      "collectionName": "timeoutColl",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "maxTimeMS value in the command is less than timeoutMS",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": "alwaysOn",
+              "data": {
+                "failCommands": [
+                  "hello",
+                  "isMaster"
+                ],
+                "appName": "reduceMaxTimeMSTest",
+                "blockConnection": true,
+                "blockTimeMS": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "uriOptions": {
+                    "appName": "reduceMaxTimeMSTest",
+                    "w": 1,
+                    "timeoutMS": 500,
+                    "heartbeatFrequencyMS": 500
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "timeoutCollection",
+                  "database": "database",
+                  "collectionName": "timeoutColl"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            },
+            "timeoutMS": 100000
+          }
+        },
+        {
+          "name": "wait",
+          "object": "testRunner",
+          "arguments": {
+            "ms": 1000
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 2
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "timeoutColl"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "timeoutColl",
+                  "maxTimeMS": {
+                    "$$lte": 450
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "command is not sent if RTT is greater than timeoutMS",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": "alwaysOn",
+              "data": {
+                "failCommands": [
+                  "hello",
+                  "isMaster"
+                ],
+                "appName": "rttTooHighTest",
+                "blockConnection": true,
+                "blockTimeMS": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "uriOptions": {
+                    "appName": "rttTooHighTest",
+                    "w": 1,
+                    "timeoutMS": 10,
+                    "heartbeatFrequencyMS": 500
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "timeoutCollection",
+                  "database": "database",
+                  "collectionName": "timeoutColl"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            },
+            "timeoutMS": 100000
+          }
+        },
+        {
+          "name": "wait",
+          "object": "testRunner",
+          "arguments": {
+            "ms": 1000
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 2
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 3
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 4
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "timeoutColl"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "short-circuit is not enabled with only 1 RTT measurement",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": "alwaysOn",
+              "data": {
+                "failCommands": [
+                  "hello",
+                  "isMaster"
+                ],
+                "appName": "reduceMaxTimeMSTest",
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "uriOptions": {
+                    "appName": "reduceMaxTimeMSTest",
+                    "w": 1,
+                    "timeoutMS": 90,
+                    "heartbeatFrequencyMS": 100000
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "timeoutCollection",
+                  "database": "database",
+                  "collectionName": "timeoutColl"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            },
+            "timeoutMS": 100000
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "timeoutCollection",
+          "arguments": {
+            "document": {
+              "_id": 2
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "timeoutColl"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "timeoutColl",
+                  "maxTimeMS": {
+                    "$$lte": 450
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json
new file mode 100644
index 00000000000..3868b3026c2
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/convenient-transactions.json
@@ -0,0 +1,209 @@
+{
+  "description": "timeoutMS behaves correctly for the withTransaction API",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 500
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    },
+    {
+      "session": {
+        "id": "session",
+        "client": "client"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback",
+      "operations": [
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "callback": [
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "document": {
+                    "_id": 1
+                  },
+                  "session": "session",
+                  "timeoutMS": 100
+                },
+                "expectError": {
+                  "isClientError": true
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": []
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is not refreshed for each operation in the callback",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 300
+              }
+            }
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "callback": [
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "document": {
+                    "_id": 1
+                  },
+                  "session": "session"
+                }
+              },
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "document": {
+                    "_id": 2
+                  },
+                  "session": "session"
+                },
+                "expectError": {
+                  "isTimeoutError": true
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json
new file mode 100644
index 00000000000..36949d75091
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/cursors.json
@@ -0,0 +1,113 @@
+{
+  "description": "tests for timeoutMS behavior that applies to all cursor types",
+  "schemaVersion": "1.0",
+  "createEntities": [
+    {
+      "client": {
+        "id": "client"
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "find errors if timeoutMode is set and timeoutMS is not",
+      "operations": [
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not",
+      "operations": [
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "database aggregate errors if timeoutMode is set and timeoutMS is not",
+      "operations": [
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [],
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "listCollections errors if timeoutMode is set and timeoutMS is not",
+      "operations": [
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "listIndexes errors if timeoutMode is set and timeoutMS is not",
+      "operations": [
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json
new file mode 100644
index 00000000000..2ecba25f0d3
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/deprecated-options.json
@@ -0,0 +1,7180 @@
+{
+  "description": "operations ignore deprecated timeout options if timeoutMS is set",
+  "comment": "Manually changed session to use defaultTimeoutMS when testing socket / maxCommit overrides",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.2",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 20
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "aggregate"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client",
+                  "sessionOptions": {
+                    "defaultTimeoutMS": 10000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "commitTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "session": "session"
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set",
+      "comment": "Moved timeoutMS from commitTransaction to startTransaction manually, as commitTransaction does not support a timeoutMS option.",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "aggregate"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 10000
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "session": "session"
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "aggregate"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client",
+                  "sessionOptions": {
+                    "defaultTimeoutMS": 1000,
+                    "defaultTransactionOptions": {
+                      "maxCommitTimeMS": 5000
+                    }
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "session": "session"
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 20
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "aggregate"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client",
+                  "sessionOptions": {
+                    "defaultTimeoutMS": 10000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "abortTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "session": "session"
+          }
+        },
+        {
+          "name": "abortTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set",
+      "comment": "Moved timeoutMS from abortTransaction to startTransaction manually, as abortTransaction does not support a timeoutMS option.",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "aggregate"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 10000
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "session": "session"
+          }
+        },
+        {
+          "name": "abortTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 20
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "commitTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 10000,
+            "callback": [
+              {
+                "name": "countDocuments",
+                "object": "collection",
+                "arguments": {
+                  "filter": {},
+                  "session": "session"
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "withTransaction ignores wTimeoutMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 10000,
+            "callback": [
+              {
+                "name": "countDocuments",
+                "object": "collection",
+                "arguments": {
+                  "filter": {},
+                  "session": "session"
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client",
+                  "sessionOptions": {
+                    "defaultTransactionOptions": {
+                      "maxCommitTimeMS": 5000
+                    }
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 1000,
+            "callback": [
+              {
+                "name": "countDocuments",
+                "object": "collection",
+                "arguments": {
+                  "filter": {},
+                  "session": "session"
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000,
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000,
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "socketTimeoutMS": 1
+                  },
+                  "useMultipleMongoses": false
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 5
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "wTimeoutMS": 1
+                  },
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 100000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "writeConcern": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "bucket": {
+                  "id": "bucket",
+                  "database": "database"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "maxTimeMS": 5000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$lte": 1000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json
new file mode 100644
index 00000000000..4889e39583a
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/error-transformations.json
@@ -0,0 +1,180 @@
+{
+  "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.0",
+      "topologies": [
+        "replicaset"
+      ]
+    },
+    {
+      "minServerVersion": "4.2",
+      "topologies": [
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 250
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "basic MaxTimeMSExpired error is transformed",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "write concern error MaxTimeMSExpired is transformed",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "writeConcernError": {
+                  "code": 50,
+                  "errmsg": "maxTimeMS expired"
+                }
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json
new file mode 100644
index 00000000000..740bbad2e2c
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/global-timeoutMS.json
@@ -0,0 +1,5830 @@
+{
+  "description": "timeoutMS can be configured on a MongoClient",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listDatabases on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 250
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 350
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "uriOptions": {
+                    "timeoutMS": 0
+                  },
+                  "useMultipleMongoses": false,
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "ignoreCommandMonitoringEvents": [
+                    "killCursors"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json
new file mode 100644
index 00000000000..c6c0944d2f4
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-advanced.json
@@ -0,0 +1,385 @@
+{
+  "description": "timeoutMS behaves correctly for advanced GridFS API operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 75
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "bucket": {
+        "id": "bucket",
+        "database": "database"
+      }
+    },
+    {
+      "collection": {
+        "id": "filesCollection",
+        "database": "database",
+        "collectionName": "fs.files"
+      }
+    },
+    {
+      "collection": {
+        "id": "chunksCollection",
+        "database": "database",
+        "collectionName": "fs.chunks"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "fs.files",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "length": 8,
+          "chunkSize": 4,
+          "uploadDate": {
+            "$date": "1970-01-01T00:00:00.000Z"
+          },
+          "filename": "length-8",
+          "contentType": "application/octet-stream",
+          "aliases": [],
+          "metadata": {}
+        }
+      ]
+    },
+    {
+      "collectionName": "fs.chunks",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 0,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        },
+        {
+          "_id": {
+            "$oid": "000000000000000000000006"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 1,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for a rename",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "rename",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            },
+            "newFilename": "foo",
+            "timeoutMS": 2000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to update during a rename",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "rename",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            },
+            "newFilename": "foo"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be overridden for drop",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "drop"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "drop",
+          "object": "bucket",
+          "arguments": {
+            "timeoutMS": 2000
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to files collection drop",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "drop"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "drop",
+          "object": "bucket",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "drop",
+                "databaseName": "test",
+                "command": {
+                  "drop": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to chunks collection drop",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "drop"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "drop",
+          "object": "bucket",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to drop as a whole, not individual parts",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "drop"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "drop",
+          "object": "bucket",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json
new file mode 100644
index 00000000000..9f4980114be
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-delete.json
@@ -0,0 +1,285 @@
+{
+  "description": "timeoutMS behaves correctly for GridFS delete operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 75
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "bucket": {
+        "id": "bucket",
+        "database": "database"
+      }
+    },
+    {
+      "collection": {
+        "id": "filesCollection",
+        "database": "database",
+        "collectionName": "fs.files"
+      }
+    },
+    {
+      "collection": {
+        "id": "chunksCollection",
+        "database": "database",
+        "collectionName": "fs.chunks"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "fs.files",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "length": 8,
+          "chunkSize": 4,
+          "uploadDate": {
+            "$date": "1970-01-01T00:00:00.000Z"
+          },
+          "filename": "length-8",
+          "contentType": "application/octet-stream",
+          "aliases": [],
+          "metadata": {}
+        }
+      ]
+    },
+    {
+      "collectionName": "fs.chunks",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 0,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        },
+        {
+          "_id": {
+            "$oid": "000000000000000000000006"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 1,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for delete",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "delete",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            },
+            "timeoutMS": 1000
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to delete against the files collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "delete",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to delete against the chunks collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "delete",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to entire delete, not individual parts",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "delete",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json
new file mode 100644
index 00000000000..fb0b582706c
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-download.json
@@ -0,0 +1,360 @@
+{
+  "description": "timeoutMS behaves correctly for GridFS download operations",
+  "comment": "Manually increased timeouts to reduce races",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 200
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "bucket": {
+        "id": "bucket",
+        "database": "database"
+      }
+    },
+    {
+      "collection": {
+        "id": "filesCollection",
+        "database": "database",
+        "collectionName": "fs.files"
+      }
+    },
+    {
+      "collection": {
+        "id": "chunksCollection",
+        "database": "database",
+        "collectionName": "fs.chunks"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "fs.files",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "length": 8,
+          "chunkSize": 4,
+          "uploadDate": {
+            "$date": "1970-01-01T00:00:00.000Z"
+          },
+          "filename": "length-8",
+          "contentType": "application/octet-stream",
+          "aliases": [],
+          "metadata": {}
+        }
+      ]
+    },
+    {
+      "collectionName": "fs.chunks",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 0,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        },
+        {
+          "_id": {
+            "$oid": "000000000000000000000006"
+          },
+          "files_id": {
+            "$oid": "000000000000000000000005"
+          },
+          "n": 1,
+          "data": {
+            "$binary": {
+              "base64": "ESIzRA==",
+              "subType": "00"
+            }
+          }
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for download",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "download",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            },
+            "timeoutMS": 1000
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find to get files document",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "download",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find to get chunks",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "download",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.chunks",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to entire download, not individual parts",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "download",
+          "object": "bucket",
+          "arguments": {
+            "id": {
+              "$oid": "000000000000000000000005"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.chunks",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json
new file mode 100644
index 00000000000..74090362844
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-find.json
@@ -0,0 +1,183 @@
+{
+  "description": "timeoutMS behaves correctly for GridFS find operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 75
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "bucket": {
+        "id": "bucket",
+        "database": "database"
+      }
+    },
+    {
+      "collection": {
+        "id": "filesCollection",
+        "database": "database",
+        "collectionName": "fs.files"
+      }
+    },
+    {
+      "collection": {
+        "id": "chunksCollection",
+        "database": "database",
+        "collectionName": "fs.chunks"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "fs.files",
+      "databaseName": "test",
+      "documents": []
+    },
+    {
+      "collectionName": "fs.chunks",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for a find",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "bucket",
+          "arguments": {
+            "filter": {},
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find command",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "bucket",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "fs.files",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json
new file mode 100644
index 00000000000..b3f174973de
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/gridfs-upload.json
@@ -0,0 +1,409 @@
+{
+  "description": "timeoutMS behaves correctly for GridFS upload operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "serverless": "forbid"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 75
+        },
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "bucket": {
+        "id": "bucket",
+        "database": "database"
+      }
+    },
+    {
+      "collection": {
+        "id": "filesCollection",
+        "database": "database",
+        "collectionName": "fs.files"
+      }
+    },
+    {
+      "collection": {
+        "id": "chunksCollection",
+        "database": "database",
+        "collectionName": "fs.chunks"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "fs.files",
+      "databaseName": "test",
+      "documents": []
+    },
+    {
+      "collectionName": "fs.chunks",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for upload",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            },
+            "timeoutMS": 1000
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to initial find on files collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to listIndexes on files collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to index creation for files collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to listIndexes on chunks collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to index creation for chunks collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to chunk insertion",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to creation of files document",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "skip": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 100
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to upload as a whole, not individual parts",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 50
+              }
+            }
+          }
+        },
+        {
+          "name": "upload",
+          "object": "bucket",
+          "arguments": {
+            "filename": "filename",
+            "source": {
+              "$$hexBytes": "1122334455"
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json
new file mode 100644
index 00000000000..535425c934a
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/legacy-timeouts.json
@@ -0,0 +1,379 @@
+{
+  "description": "legacy timeouts continue to work if timeoutMS is not set",
+  "schemaVersion": "1.0",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "socketTimeoutMS is not used to derive a maxTimeMS command field",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "uriOptions": {
+                    "socketTimeoutMS": 50000
+                  }
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "uriOptions": {
+                    "waitQueueTimeoutMS": 50000
+                  }
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "wTimeoutMS is not used to derive a maxTimeMS command field",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ],
+                  "uriOptions": {
+                    "wTimeoutMS": 50000
+                  }
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  },
+                  "writeConcern": {
+                    "wtimeout": 50000
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxTimeMS option is used directly as the maxTimeMS field on a command",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "maxTimeMS": 50000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": 50000
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command",
+      "runOnRequirements": [
+        {
+          "topologies": [
+            "replicaset",
+            "sharded"
+          ]
+        }
+      ],
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "client": {
+                  "id": "client",
+                  "observeEvents": [
+                    "commandStartedEvent"
+                  ]
+                }
+              },
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test"
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              },
+              {
+                "session": {
+                  "id": "session",
+                  "client": "client",
+                  "sessionOptions": {
+                    "defaultTransactionOptions": {
+                      "maxCommitTimeMS": 1000
+                    }
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "_id": 1
+            },
+            "session": "session"
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": 1000
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json
new file mode 100644
index 00000000000..dd22ac3996f
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/non-tailable-cursors.json
@@ -0,0 +1,542 @@
+{
+  "description": "timeoutMS behaves correctly for non-tailable cursors",
+  "comment": "Manually reduced blockTimeMS for tests to pass in serverless",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 200
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": [
+        {
+          "_id": 0
+        },
+        {
+          "_id": 1
+        },
+        {
+          "_id": 2
+        }
+      ]
+    },
+    {
+      "collectionName": "aggregateOutputColl",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "remaining timeoutMS applied to getMore if timeoutMode is unset",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 101
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMS": 200,
+            "batchSize": 2
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 101
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime",
+            "timeoutMS": 200,
+            "batchSize": 2
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find if timeoutMode is iteration",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "iteration"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 101
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "iteration",
+            "timeoutMS": 200,
+            "batchSize": 2
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "iteration",
+            "batchSize": 2
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "aggregate with $out errors if timeoutMode is iteration",
+      "operations": [
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [
+              {
+                "$out": "aggregateOutputColl"
+              }
+            ],
+            "timeoutMS": 100,
+            "timeoutMode": "iteration"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": []
+        }
+      ]
+    },
+    {
+      "description": "aggregate with $merge errors if timeoutMode is iteration",
+      "operations": [
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": [
+              {
+                "$merge": "aggregateOutputColl"
+              }
+            ],
+            "timeoutMS": 100,
+            "timeoutMode": "iteration"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": []
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json
new file mode 100644
index 00000000000..d17e22fc2f4
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-collection-timeoutMS.json
@@ -0,0 +1,3498 @@
+{
+  "description": "timeoutMS can be overridden for a MongoCollection",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 10
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll",
+                  "collectionOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json
new file mode 100644
index 00000000000..f7fa642c582
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-database-timeoutMS.json
@@ -0,0 +1,4622 @@
+{
+  "description": "timeoutMS can be overridden for a MongoDatabase",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 10
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "name": "x_1"
+          },
+          "expectError": {
+            "isClientError": false,
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 1000
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "createEntities",
+          "object": "testRunner",
+          "arguments": {
+            "entities": [
+              {
+                "database": {
+                  "id": "database",
+                  "client": "client",
+                  "databaseName": "test",
+                  "databaseOptions": {
+                    "timeoutMS": 0
+                  }
+                }
+              },
+              {
+                "collection": {
+                  "id": "collection",
+                  "database": "database",
+                  "collectionName": "coll"
+                }
+              }
+            ]
+          }
+        },
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json
new file mode 100644
index 00000000000..6fa0bd802a6
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/override-operation-timeoutMS.json
@@ -0,0 +1,3577 @@
+{
+  "description": "timeoutMS can be overridden for an operation",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 10
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be configured for an operation - listDatabases on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listDatabases on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - createChangeStream on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - aggregate on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - aggregate on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - listCollections on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listCollections on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - runCommand on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - runCommand on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "ping"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "runCommand",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "command": {
+              "ping": 1
+            },
+            "commandName": "ping"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "ping",
+                "databaseName": "test",
+                "command": {
+                  "ping": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - createChangeStream on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - aggregate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - aggregate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - count on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - count on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - countDocuments on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - distinct on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - distinct on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - find on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - find on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - findOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - findOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - listIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexNames",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - insertOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - insertOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - insertMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - insertMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - deleteOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - deleteMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - replaceOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - updateOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - updateOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - updateMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - updateMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "updateMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - createIndex on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - createIndex on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "createIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "createIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "keys": {
+              "x": 1
+            },
+            "name": "x_1"
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "createIndexes",
+                "databaseName": "test",
+                "command": {
+                  "createIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - dropIndex on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "name": "x_1"
+          },
+          "expectError": {
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndex",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "name": "x_1"
+          },
+          "expectError": {
+            "isTimeoutError": false
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be configured for an operation - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "dropIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 15
+              }
+            }
+          }
+        },
+        {
+          "name": "dropIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "dropIndexes",
+                "databaseName": "test",
+                "command": {
+                  "dropIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json
new file mode 100644
index 00000000000..aded781aeed
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-legacy-timeouts.json
@@ -0,0 +1,3042 @@
+{
+  "description": "legacy timeouts behave correctly for retryable operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "socketTimeoutMS": 100
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "operation succeeds after one socket timeout - insertOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - insertOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - insertMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - insertMany on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - deleteOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - deleteOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - replaceOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - replaceOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - updateOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - updateOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - listDatabases on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - listDatabases on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - createChangeStream on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - createChangeStream on client",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - aggregate on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - aggregate on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - listCollections on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - listCollections on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - createChangeStream on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - createChangeStream on database",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - aggregate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - aggregate on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - count on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - count on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - countDocuments on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - countDocuments on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - distinct on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - distinct on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - find on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - find on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - findOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - findOne on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - listIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - listIndexes on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation succeeds after one socket timeout - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 125
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json
new file mode 100644
index 00000000000..9daad260ef3
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/retryability-timeoutMS.json
@@ -0,0 +1,5688 @@
+{
+  "description": "timeoutMS behaves correctly for retryable operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.0",
+      "topologies": [
+        "replicaset"
+      ]
+    },
+    {
+      "minServerVersion": "4.2",
+      "topologies": [
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 100
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "document": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "document": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "insertMany",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "documents": [
+              {
+                "x": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "delete"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "deleteOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "delete",
+                "databaseName": "test",
+                "command": {
+                  "delete": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "replaceOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "update"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "updateOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "update",
+                "databaseName": "test",
+                "command": {
+                  "update": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndDelete",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndReplace",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "replacement": {
+              "x": 1
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "findAndModify"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOneAndUpdate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {},
+            "update": {
+              "$set": {
+                "x": 1
+              }
+            }
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "findAndModify",
+                "databaseName": "test",
+                "command": {
+                  "findAndModify": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "bulkWrite",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "requests": [
+              {
+                "insertOne": {
+                  "document": {
+                    "_id": 1
+                  }
+                }
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabases",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listDatabases"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listDatabaseNames",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listDatabases",
+                "databaseName": "admin",
+                "command": {
+                  "listDatabases": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "client",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "admin",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": [
+              {
+                "$listLocalSessions": {}
+              },
+              {
+                "$limit": 1
+              }
+            ]
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollections",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listCollections"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listCollectionNames",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listCollections",
+                "databaseName": "test",
+                "command": {
+                  "listCollections": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "database",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": 1,
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "aggregate",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - count on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - count on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - count on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "count",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "countDocuments",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "count"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "estimatedDocumentCount",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "count",
+                "databaseName": "test",
+                "command": {
+                  "count": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "fieldName": "x",
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "distinct"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "distinct",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "fieldName": "x",
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "distinct",
+                "databaseName": "test",
+                "command": {
+                  "distinct": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - find on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - find on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - find on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "filter": {}
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "findOne",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "filter": {}
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "listIndexes"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "listIndexes",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "listIndexes",
+                "databaseName": "test",
+                "command": {
+                  "listIndexes": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.4"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 4
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60,
+                "errorCode": 7,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "pipeline": []
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 1000,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection",
+      "runOnRequirements": [
+        {
+          "minServerVersion": "4.3.1"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "aggregate"
+                ],
+                "errorCode": 7,
+                "closeConnection": false,
+                "errorLabels": [
+                  "RetryableWriteError"
+                ]
+              }
+            }
+          }
+        },
+        {
+          "name": "createChangeStream",
+          "object": "collection",
+          "arguments": {
+            "timeoutMS": 0,
+            "pipeline": []
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "aggregate",
+                "databaseName": "test",
+                "command": {
+                  "aggregate": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json
new file mode 100644
index 00000000000..5fc0be33997
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/runCursorCommand.json
@@ -0,0 +1,583 @@
+{
+  "description": "runCursorCommand",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "commandClient",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent",
+          "commandSucceededEvent"
+        ]
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ],
+        "ignoreCommandMonitoringEvents": [
+          "killCursors"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "commandDb",
+        "client": "commandClient",
+        "databaseName": "commandDb"
+      }
+    },
+    {
+      "database": {
+        "id": "db",
+        "client": "client",
+        "databaseName": "db"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "db",
+        "collectionName": "collection"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "collection",
+      "databaseName": "db",
+      "documents": [
+        {
+          "_id": 1,
+          "x": 11
+        },
+        {
+          "_id": 2,
+          "x": 22
+        },
+        {
+          "_id": 3,
+          "x": 33
+        },
+        {
+          "_id": 4,
+          "x": 44
+        },
+        {
+          "_id": 5,
+          "x": 55
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "errors if timeoutMode is set without timeoutMS",
+      "operations": [
+        {
+          "name": "runCursorCommand",
+          "object": "db",
+          "arguments": {
+            "commandName": "find",
+            "command": {
+              "find": "collection"
+            },
+            "timeoutMode": "cursorLifetime"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait",
+      "operations": [
+        {
+          "name": "runCursorCommand",
+          "object": "db",
+          "arguments": {
+            "commandName": "find",
+            "command": {
+              "find": "collection"
+            },
+            "timeoutMode": "cursorLifetime",
+            "cursorType": "tailableAwait"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset",
+      "runOnRequirements": [
+        {
+          "serverless": "forbid"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60
+              }
+            }
+          }
+        },
+        {
+          "name": "runCursorCommand",
+          "object": "db",
+          "arguments": {
+            "commandName": "find",
+            "timeoutMS": 100,
+            "command": {
+              "find": "collection",
+              "batchSize": 2
+            }
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "command": {
+                  "find": "collection",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "collection",
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure",
+      "runOnRequirements": [
+        {
+          "serverless": "forbid"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60
+              }
+            }
+          }
+        },
+        {
+          "name": "runCursorCommand",
+          "object": "db",
+          "arguments": {
+            "commandName": "find",
+            "command": {
+              "find": "collection",
+              "batchSize": 2
+            },
+            "timeoutMode": "iteration",
+            "timeoutMS": 100,
+            "batchSize": 2
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "db",
+                "command": {
+                  "find": "collection",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "db",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "collection",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure",
+      "runOnRequirements": [
+        {
+          "serverless": "forbid"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60
+              }
+            }
+          }
+        },
+        {
+          "name": "dropCollection",
+          "object": "db",
+          "arguments": {
+            "collection": "cappedCollection"
+          }
+        },
+        {
+          "name": "createCollection",
+          "object": "db",
+          "arguments": {
+            "collection": "cappedCollection",
+            "capped": true,
+            "size": 4096,
+            "max": 3
+          },
+          "saveResultAsEntity": "cappedCollection"
+        },
+        {
+          "name": "insertMany",
+          "object": "cappedCollection",
+          "arguments": {
+            "documents": [
+              {
+                "_id": 1,
+                "x": 11
+              },
+              {
+                "_id": 2,
+                "x": 22
+              }
+            ]
+          }
+        },
+        {
+          "name": "createCommandCursor",
+          "object": "db",
+          "arguments": {
+            "commandName": "find",
+            "command": {
+              "find": "cappedCollection",
+              "batchSize": 1,
+              "tailable": true
+            },
+            "timeoutMode": "iteration",
+            "timeoutMS": 100,
+            "batchSize": 1,
+            "cursorType": "tailable"
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "drop"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "create"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "db",
+                "command": {
+                  "find": "cappedCollection",
+                  "tailable": true,
+                  "awaitData": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "db",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "cappedCollection",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure",
+      "runOnRequirements": [
+        {
+          "serverless": "forbid"
+        }
+      ],
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 60
+              }
+            }
+          }
+        },
+        {
+          "name": "dropCollection",
+          "object": "db",
+          "arguments": {
+            "collection": "cappedCollection"
+          }
+        },
+        {
+          "name": "createCollection",
+          "object": "db",
+          "arguments": {
+            "collection": "cappedCollection",
+            "capped": true,
+            "size": 4096,
+            "max": 3
+          },
+          "saveResultAsEntity": "cappedCollection"
+        },
+        {
+          "name": "insertMany",
+          "object": "cappedCollection",
+          "arguments": {
+            "documents": [
+              {
+                "foo": "bar"
+              },
+              {
+                "fizz": "buzz"
+              }
+            ]
+          }
+        },
+        {
+          "name": "createCommandCursor",
+          "object": "db",
+          "arguments": {
+            "command": {
+              "find": "cappedCollection",
+              "tailable": true,
+              "awaitData": true
+            },
+            "cursorType": "tailableAwait",
+            "batchSize": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "drop"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "create"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "db",
+                "command": {
+                  "find": "cappedCollection",
+                  "tailable": true,
+                  "awaitData": true,
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "db",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "cappedCollection"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json
new file mode 100644
index 00000000000..13ea91c7948
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-inherit-timeoutMS.json
@@ -0,0 +1,331 @@
+{
+  "description": "sessions inherit timeoutMS from their parent MongoClient",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 500
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent",
+          "commandSucceededEvent",
+          "commandFailedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    },
+    {
+      "session": {
+        "id": "session",
+        "client": "client"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS applied to commitTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "commitTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "commitTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to abortTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "abortTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "abortTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to withTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "callback": [
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "session": "session",
+                  "document": {
+                    "_id": 1
+                  }
+                },
+                "expectError": {
+                  "isTimeoutError": true
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json
new file mode 100644
index 00000000000..441c698328c
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-operation-timeoutMS.json
@@ -0,0 +1,335 @@
+{
+  "description": "timeoutMS can be overridden for individual session operations",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent",
+          "commandSucceededEvent",
+          "commandFailedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    },
+    {
+      "session": {
+        "id": "session",
+        "client": "client"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS can be overridden for commitTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "commitTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 500
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "commitTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to abortTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "abortTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "abortTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 500
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to withTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "timeoutMS": 500,
+            "callback": [
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "session": "session",
+                  "document": {
+                    "_id": 1
+                  }
+                },
+                "expectError": {
+                  "isTimeoutError": true
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json
new file mode 100644
index 00000000000..d90152e909c
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/sessions-override-timeoutMS.json
@@ -0,0 +1,331 @@
+{
+  "description": "timeoutMS can be overridden at the level of a ClientSession",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4",
+      "topologies": [
+        "replicaset",
+        "sharded"
+      ]
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent",
+          "commandSucceededEvent",
+          "commandFailedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    },
+    {
+      "session": {
+        "id": "session",
+        "client": "client",
+        "sessionOptions": {
+          "defaultTimeoutMS": 500
+        }
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "documents": []
+    }
+  ],
+  "tests": [
+    {
+      "description": "timeoutMS applied to commitTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "commitTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "commitTransaction",
+          "object": "session",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "commitTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "commitTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "commitTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to abortTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "abortTransaction"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "startTransaction",
+          "object": "session"
+        },
+        {
+          "name": "insertOne",
+          "object": "collection",
+          "arguments": {
+            "session": "session",
+            "document": {
+              "_id": 1
+            }
+          }
+        },
+        {
+          "name": "abortTransaction",
+          "object": "session"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll"
+                }
+              }
+            },
+            {
+              "commandSucceededEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to withTransaction",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "insert"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 600
+              }
+            }
+          }
+        },
+        {
+          "name": "withTransaction",
+          "object": "session",
+          "arguments": {
+            "callback": [
+              {
+                "name": "insertOne",
+                "object": "collection",
+                "arguments": {
+                  "session": "session",
+                  "document": {
+                    "_id": 1
+                  }
+                },
+                "expectError": {
+                  "isTimeoutError": true
+                }
+              }
+            ]
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "insert",
+                "databaseName": "test",
+                "command": {
+                  "insert": "coll",
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "insert"
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "abortTransaction",
+                "databaseName": "admin",
+                "command": {
+                  "abortTransaction": 1,
+                  "maxTimeMS": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  }
+                }
+              }
+            },
+            {
+              "commandFailedEvent": {
+                "commandName": "abortTransaction"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json
new file mode 100644
index 00000000000..d0fe950dd8e
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-awaitData.json
@@ -0,0 +1,424 @@
+{
+  "description": "timeoutMS behaves correctly for tailable awaitData cursors",
+  "comment": "Manually changed: timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set. Added ignoreExtra events, sometimes an extra getMore is called.",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 200
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "createOptions": {
+        "capped": true,
+        "size": 500
+      },
+      "documents": [
+        {
+          "_id": 0
+        },
+        {
+          "_id": 1
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "error if timeoutMode is cursor_lifetime",
+      "operations": [
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime",
+            "cursorType": "tailableAwait"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "error if maxAwaitTimeMS is greater than timeoutMS",
+      "operations": [
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait",
+            "timeoutMS": 5,
+            "maxAwaitTimeMS": 10
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "error if maxAwaitTimeMS is equal to timeoutMS",
+      "operations": [
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait",
+            "timeoutMS": 5,
+            "maxAwaitTimeMS": 5
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 300
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": true,
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 150
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait",
+            "timeoutMS": 250,
+            "batchSize": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": true,
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 150
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait",
+            "timeoutMS": 250,
+            "batchSize": 1,
+            "maxAwaitTimeMS": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "ignoreExtraEvents": true,
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": true,
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": 1
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore - failure",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailableAwait",
+            "batchSize": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": true,
+                  "maxTimeMS": {
+                    "$$exists": true
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll"
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json
new file mode 100644
index 00000000000..e88230e4f7a
--- /dev/null
+++ b/driver-core/src/test/resources/unified-test-format/client-side-operation-timeout/tailable-non-awaitData.json
@@ -0,0 +1,312 @@
+{
+  "description": "timeoutMS behaves correctly for tailable non-awaitData cursors",
+  "schemaVersion": "1.9",
+  "runOnRequirements": [
+    {
+      "minServerVersion": "4.4"
+    }
+  ],
+  "createEntities": [
+    {
+      "client": {
+        "id": "failPointClient",
+        "useMultipleMongoses": false
+      }
+    },
+    {
+      "client": {
+        "id": "client",
+        "uriOptions": {
+          "timeoutMS": 200
+        },
+        "useMultipleMongoses": false,
+        "observeEvents": [
+          "commandStartedEvent"
+        ]
+      }
+    },
+    {
+      "database": {
+        "id": "database",
+        "client": "client",
+        "databaseName": "test"
+      }
+    },
+    {
+      "collection": {
+        "id": "collection",
+        "database": "database",
+        "collectionName": "coll"
+      }
+    }
+  ],
+  "initialData": [
+    {
+      "collectionName": "coll",
+      "databaseName": "test",
+      "createOptions": {
+        "capped": true,
+        "size": 500
+      },
+      "documents": [
+        {
+          "_id": 0
+        },
+        {
+          "_id": 1
+        }
+      ]
+    }
+  ],
+  "tests": [
+    {
+      "description": "error if timeoutMode is cursor_lifetime",
+      "operations": [
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "timeoutMode": "cursorLifetime",
+            "cursorType": "tailable"
+          },
+          "expectError": {
+            "isClientError": true
+          }
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS applied to find",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "find"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "find",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailable"
+          },
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore - success",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 2
+              },
+              "data": {
+                "failCommands": [
+                  "find",
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 150
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailable",
+            "timeoutMS": 200,
+            "batchSize": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "description": "timeoutMS is refreshed for getMore - failure",
+      "operations": [
+        {
+          "name": "failPoint",
+          "object": "testRunner",
+          "arguments": {
+            "client": "failPointClient",
+            "failPoint": {
+              "configureFailPoint": "failCommand",
+              "mode": {
+                "times": 1
+              },
+              "data": {
+                "failCommands": [
+                  "getMore"
+                ],
+                "blockConnection": true,
+                "blockTimeMS": 250
+              }
+            }
+          }
+        },
+        {
+          "name": "createFindCursor",
+          "object": "collection",
+          "arguments": {
+            "filter": {},
+            "cursorType": "tailable",
+            "batchSize": 1
+          },
+          "saveResultAsEntity": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor"
+        },
+        {
+          "name": "iterateUntilDocumentOrError",
+          "object": "tailableCursor",
+          "expectError": {
+            "isTimeoutError": true
+          }
+        }
+      ],
+      "expectEvents": [
+        {
+          "client": "client",
+          "events": [
+            {
+              "commandStartedEvent": {
+                "commandName": "find",
+                "databaseName": "test",
+                "command": {
+                  "find": "coll",
+                  "tailable": true,
+                  "awaitData": {
+                    "$$exists": false
+                  },
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            },
+            {
+              "commandStartedEvent": {
+                "commandName": "getMore",
+                "databaseName": "test",
+                "command": {
+                  "getMore": {
+                    "$$type": [
+                      "int",
+                      "long"
+                    ]
+                  },
+                  "collection": "coll",
+                  "maxTimeMS": {
+                    "$$exists": false
+                  }
+                }
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
diff --git a/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy
new file mode 100644
index 00000000000..43deb3bd42c
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb
+
+import spock.lang.Specification
+
+import javax.net.ssl.SSLContext
+import java.util.concurrent.TimeUnit
+import java.util.function.Supplier
+
+class ClientEncryptionSettingsSpecification extends Specification {
+
+    def 'should have return the configured values defaults'() {
+        given:
+        def mongoClientSettings = MongoClientSettings.builder().build()
+        def keyVaultNamespace = "keyVaultNamespace"
+        def kmsProvider = ["provider": ["test" : "test"]]
+        def kmsProviderSupplier = ["provider": { ["test" : "test"] } as Supplier]
+        def kmsProviderSslContextMap = ["provider": SSLContext.getDefault()]
+
+        when:
+        def options = ClientEncryptionSettings.builder()
+                .keyVaultMongoClientSettings(mongoClientSettings)
+                .keyVaultNamespace(keyVaultNamespace)
+                .kmsProviders(kmsProvider)
+                .build()
+
+        then:
+        options.getKeyVaultMongoClientSettings() == mongoClientSettings
+        options.getKeyVaultNamespace() == keyVaultNamespace
+        options.getKmsProviders() == kmsProvider
+        options.getKmsProviderPropertySuppliers() == [:]
+        options.getKmsProviderSslContextMap() == [:]
+        options.getTimeout(TimeUnit.MILLISECONDS) == null
+
+        when:
+        options = ClientEncryptionSettings.builder()
+                .keyVaultMongoClientSettings(mongoClientSettings)
+                .keyVaultNamespace(keyVaultNamespace)
+                .kmsProviders(kmsProvider)
+                .kmsProviderPropertySuppliers(kmsProviderSupplier)
+                .kmsProviderSslContextMap(kmsProviderSslContextMap)
+                .timeout(1_000, TimeUnit.MILLISECONDS)
+                .build()
+
+        then:
+        options.getKeyVaultMongoClientSettings() == mongoClientSettings
+        options.getKeyVaultNamespace() == keyVaultNamespace
+        options.getKmsProviders() == kmsProvider
+        options.getKmsProviderPropertySuppliers() == kmsProviderSupplier
+        options.getKmsProviderSslContextMap() == kmsProviderSslContextMap
+        options.getTimeout(TimeUnit.MILLISECONDS) == 1_000
+    }
+
+    def 'should throw an exception if the defaultTimeout is set and negative'() {
+        given:
+        def builder = ClientEncryptionSettings.builder()
+
+        when:
+        builder.timeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        builder.timeout(-1, TimeUnit.SECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+    }
+
+}
diff --git a/driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy
similarity index 83%
rename from driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy
rename to driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy
index d48199f7b12..98bf163f9e3 100644
--- a/driver-legacy/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy
@@ -18,6 +18,8 @@ package com.mongodb
 
 import spock.lang.Specification
 
+import java.util.concurrent.TimeUnit
+
 class ClientSessionOptionsSpecification extends Specification {
 
     def 'should have correct defaults'() {
@@ -45,6 +47,23 @@ class ClientSessionOptionsSpecification extends Specification {
         transactionOptions << [TransactionOptions.builder().build(), TransactionOptions.builder().readConcern(ReadConcern.LOCAL).build()]
     }
 
+    def 'should throw an exception if the defaultTimeout is set and negative'() {
+        given:
+        def builder = ClientSessionOptions.builder()
+
+        when:
+        builder.defaultTimeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        builder.defaultTimeout(-1, TimeUnit.SECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+    }
+
     def 'should apply options to builder'() {
         expect:
         ClientSessionOptions.builder(baseOptions).build() == baseOptions
diff --git a/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy
index 90f28833ba5..ec5d92b1e49 100644
--- a/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy
@@ -50,7 +50,7 @@ class MongoClientSettingsSpecification extends Specification {
         settings.getReadPreference() == ReadPreference.primary()
         settings.getCommandListeners().isEmpty()
         settings.getApplicationName() == null
-        settings.getLoggerSettings() == LoggerSettings.builder().build();
+        settings.getLoggerSettings() == LoggerSettings.builder().build()
         settings.clusterSettings == ClusterSettings.builder().build()
         settings.connectionPoolSettings == ConnectionPoolSettings.builder().build()
         settings.socketSettings == SocketSettings.builder().build()
@@ -64,6 +64,7 @@ class MongoClientSettingsSpecification extends Specification {
         settings.contextProvider == null
         settings.dnsClient == null
         settings.inetAddressResolver == null
+        settings.getTimeout(TimeUnit.MILLISECONDS) == null
     }
 
     @SuppressWarnings('UnnecessaryObjectReferences')
@@ -151,6 +152,7 @@ class MongoClientSettingsSpecification extends Specification {
                 .contextProvider(contextProvider)
                 .dnsClient(dnsClient)
                 .inetAddressResolver(inetAddressResolver)
+                .timeout(1000, TimeUnit.SECONDS)
                 .build()
 
         then:
@@ -172,6 +174,7 @@ class MongoClientSettingsSpecification extends Specification {
         settings.getContextProvider() == contextProvider
         settings.getDnsClient() == dnsClient
         settings.getInetAddressResolver() == inetAddressResolver
+        settings.getTimeout(TimeUnit.MILLISECONDS) == 1_000_000
     }
 
     def 'should be easy to create new settings from existing'() {
@@ -213,6 +216,7 @@ class MongoClientSettingsSpecification extends Specification {
                 .contextProvider(contextProvider)
                 .dnsClient(dnsClient)
                 .inetAddressResolver(inetAddressResolver)
+                .timeout(0, TimeUnit.SECONDS)
                 .build()
 
         then:
@@ -241,6 +245,30 @@ class MongoClientSettingsSpecification extends Specification {
         thrown(IllegalArgumentException)
     }
 
+    def 'should throw an exception if the timeout is invalid'() {
+        given:
+        def builder = MongoClientSettings.builder()
+
+        when:
+        builder.timeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        builder.timeout(-1, TimeUnit.SECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        def connectionString = new ConnectionString('mongodb://localhost/?timeoutMS=-1')
+        builder.applyConnectionString(connectionString).build()
+
+        then:
+        thrown(IllegalStateException)
+    }
+
     def 'should add command listeners'() {
         given:
         CommandListener commandListenerOne = Mock(CommandListener)
@@ -308,6 +336,7 @@ class MongoClientSettingsSpecification extends Specification {
                 + '&readConcernLevel=majority'
                 + '&compressors=zlib&zlibCompressionLevel=5'
                 + '&uuidRepresentation=standard'
+                + '&timeoutMS=10000'
                 + '&proxyHost=proxy.com'
                 + '&proxyPort=1080'
                 + '&proxyUsername=username'
@@ -370,6 +399,7 @@ class MongoClientSettingsSpecification extends Specification {
             .retryWrites(true)
             .retryReads(true)
             .uuidRepresentation(UuidRepresentation.STANDARD)
+            .timeout(10000, TimeUnit.MILLISECONDS)
             .build()
 
         then:
@@ -525,7 +555,7 @@ class MongoClientSettingsSpecification extends Specification {
                         'heartbeatConnectTimeoutMS', 'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'loggerSettingsBuilder',
                         'readConcern', 'readPreference', 'retryReads',
                         'retryWrites', 'serverApi', 'serverSettingsBuilder', 'socketSettingsBuilder', 'sslSettingsBuilder',
-                        'transportSettings', 'uuidRepresentation', 'writeConcern']
+                        'timeoutMS', 'transportSettings', 'uuidRepresentation', 'writeConcern']
 
         then:
         actual == expected
@@ -540,7 +570,8 @@ class MongoClientSettingsSpecification extends Specification {
                         'applyToSslSettings', 'autoEncryptionSettings', 'build', 'codecRegistry', 'commandListenerList',
                         'compressorList', 'contextProvider', 'credential', 'dnsClient', 'heartbeatConnectTimeoutMS',
                         'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'readConcern', 'readPreference', 'retryReads', 'retryWrites',
-                        'serverApi', 'transportSettings', 'uuidRepresentation', 'writeConcern']
+                        'serverApi', 'timeout', 'transportSettings', 'uuidRepresentation', 'writeConcern']
+
         then:
         actual == expected
     }
diff --git a/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy
index 37e190432ff..5b3f35f42f1 100644
--- a/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy
@@ -32,6 +32,24 @@ class TransactionOptionsSpecification extends Specification {
         options.getMaxCommitTime(TimeUnit.MILLISECONDS) == null
     }
 
+    def 'should throw an exception if the timeout is invalid'() {
+        given:
+        def builder = TransactionOptions.builder()
+
+
+        when:
+        builder.timeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        builder.timeout(-1, TimeUnit.SECONDS).build()
+
+        then:
+        thrown(IllegalArgumentException)
+    }
+
     def 'should apply options set in builder'() {
         when:
         def options = TransactionOptions.builder()
@@ -39,6 +57,7 @@ class TransactionOptionsSpecification extends Specification {
                 .writeConcern(WriteConcern.JOURNALED)
                 .readPreference(ReadPreference.secondary())
                 .maxCommitTime(5, TimeUnit.SECONDS)
+                .timeout(null, TimeUnit.MILLISECONDS)
                 .build()
 
         then:
@@ -47,6 +66,7 @@ class TransactionOptionsSpecification extends Specification {
         options.readPreference == ReadPreference.secondary()
         options.getMaxCommitTime(TimeUnit.MILLISECONDS) == 5000
         options.getMaxCommitTime(TimeUnit.SECONDS) == 5
+        options.getTimeout(TimeUnit.MILLISECONDS) == null
     }
 
     def 'should merge'() {
@@ -56,12 +76,14 @@ class TransactionOptionsSpecification extends Specification {
                 .writeConcern(WriteConcern.MAJORITY)
                 .readPreference(ReadPreference.secondary())
                 .maxCommitTime(5, TimeUnit.SECONDS)
+                .timeout(123, TimeUnit.MILLISECONDS)
                 .build()
         def third = TransactionOptions.builder()
                 .readConcern(ReadConcern.LOCAL)
                 .writeConcern(WriteConcern.W2)
                 .readPreference(ReadPreference.nearest())
                 .maxCommitTime(10, TimeUnit.SECONDS)
+                .timeout(123, TimeUnit.MILLISECONDS)
                 .build()
 
         expect:
diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java
index ac1d17db549..36e25cb534c 100644
--- a/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java
+++ b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java
@@ -80,6 +80,7 @@ public void testDefaults() {
         assertFalse(serverDescription.isSecondary());
 
         assertEquals(0F, serverDescription.getRoundTripTimeNanos(), 0L);
+        assertEquals(0F, serverDescription.getMinRoundTripTimeNanos(), 0L);
 
         assertEquals(0x1000000, serverDescription.getMaxDocumentSize());
 
@@ -92,6 +93,7 @@ public void testDefaults() {
         assertNull(serverDescription.getSetName());
         assertEquals(0, serverDescription.getMinWireVersion());
         assertEquals(0, serverDescription.getMaxWireVersion());
+        assertFalse(serverDescription.isCryptd());
         assertNull(serverDescription.getElectionId());
         assertNull(serverDescription.getSetVersion());
         assertNull(serverDescription.getTopologyVersion());
@@ -112,6 +114,7 @@ public void testBuilder() {
                                               .setName("test")
                                               .maxDocumentSize(100)
                                               .roundTripTime(50000, java.util.concurrent.TimeUnit.NANOSECONDS)
+                                              .minRoundTripTime(10000, java.util.concurrent.TimeUnit.NANOSECONDS)
                                               .primary("localhost:27017")
                                               .canonicalAddress("localhost:27018")
                                               .hosts(new HashSet<>(asList("localhost:27017",
@@ -131,6 +134,7 @@ public void testBuilder() {
                                               .lastUpdateTimeNanos(40000L)
                                               .logicalSessionTimeoutMinutes(30)
                                               .exception(exception)
+                                              .cryptd(true)
                                               .build();
 
 
@@ -147,6 +151,7 @@ public void testBuilder() {
         assertFalse(serverDescription.isSecondary());
 
         assertEquals(50000, serverDescription.getRoundTripTimeNanos(), 0L);
+        assertEquals(10000, serverDescription.getMinRoundTripTimeNanos(), 0L);
 
         assertEquals(100, serverDescription.getMaxDocumentSize());
 
@@ -168,6 +173,7 @@ public void testBuilder() {
         assertEquals((Integer) 30, serverDescription.getLogicalSessionTimeoutMinutes());
         assertEquals(exception, serverDescription.getException());
         assertEquals(serverDescription, builder(serverDescription).build());
+        assertTrue(serverDescription.isCryptd());
     }
 
     @Test
@@ -235,6 +241,9 @@ public void testObjectOverrides() {
         otherDescription = createBuilder().topologyVersion(new TopologyVersion(new ObjectId(), 44)).build();
         assertNotEquals(builder.build(), otherDescription);
 
+        otherDescription = createBuilder().cryptd(true).build();
+        assertNotEquals(builder.build(), otherDescription);
+
         // test exception state changes
         assertNotEquals(createBuilder().exception(new IOException()).build(),
                 createBuilder().exception(new RuntimeException()).build());
@@ -516,28 +525,4 @@ public void serverWithMaxWireVersionLessThanDriverMinWireVersionShouldBeIncompat
         assertFalse(serverDescription.isIncompatiblyNewerThanDriver());
         assertTrue(serverDescription.isIncompatiblyOlderThanDriver());
     }
-
-    private static final ServerDescription SERVER_DESCRIPTION = builder()
-            .address(new ServerAddress())
-            .type(ServerType.SHARD_ROUTER)
-            .tagSet(new TagSet(singletonList(new Tag("dc", "ny"))))
-            .setName("test")
-            .maxDocumentSize(100)
-            .roundTripTime(50000, TimeUnit.NANOSECONDS)
-            .primary("localhost:27017")
-            .canonicalAddress("localhost:27017")
-            .hosts(new HashSet<>(asList("localhost:27017", "localhost:27018")))
-            .passives(new HashSet<>(singletonList("localhost:27019")))
-            .arbiters(new HashSet<>(singletonList("localhost:27020")))
-            .ok(true)
-            .state(CONNECTED)
-            .minWireVersion(1)
-            .lastWriteDate(new Date())
-            .maxWireVersion(2)
-            .electionId(new ObjectId("abcdabcdabcdabcdabcdabcd"))
-            .setVersion(2)
-            .lastUpdateTimeNanos(1)
-            .lastWriteDate(new Date(42))
-            .logicalSessionTimeoutMinutes(25)
-            .roundTripTime(56, TimeUnit.MILLISECONDS).build();
 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java
new file mode 100644
index 00000000000..130d408076e
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal;
+
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
+import com.mongodb.session.ClientSession;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.mockito.Mockito;
+
+import java.util.function.Supplier;
+import java.util.stream.Stream;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_COMMIT;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT;
+import static com.mongodb.ClusterFixture.sleep;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+final class TimeoutContextTest {
+
+    public static long getMaxTimeMS(final TimeoutContext timeoutContext) {
+        long[] result = {0L};
+        timeoutContext.runMaxTimeMS((ms) -> result[0] = ms);
+        return result[0];
+    }
+
+    @Test
+    @DisplayName("test defaults")
+    void testDefaults() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(0, getMaxTimeMS(timeoutContext));
+        assertEquals(0, timeoutContext.getMaxAwaitTimeMS());
+        assertEquals(0, timeoutContext.getMaxCommitTimeMS());
+        assertEquals(0, timeoutContext.getReadTimeoutMS());
+    }
+
+    @Test
+    @DisplayName("Uses timeoutMS if set")
+    void testUsesTimeoutMSIfSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT);
+
+        assertTrue(timeoutContext.hasTimeoutMS());
+        assertTrue(getMaxTimeMS(timeoutContext) > 0);
+        assertEquals(0, timeoutContext.getMaxAwaitTimeMS());
+    }
+
+    @Test
+    @DisplayName("infinite timeoutMS")
+    void testInfiniteTimeoutMS() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT);
+
+        assertTrue(timeoutContext.hasTimeoutMS());
+        assertEquals(0, getMaxTimeMS(timeoutContext));
+        assertEquals(0, timeoutContext.getMaxAwaitTimeMS());
+    }
+
+    @Test
+    @DisplayName("MaxTimeMS set")
+    void testMaxTimeMSSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(100, getMaxTimeMS(timeoutContext));
+        assertEquals(0, timeoutContext.getMaxAwaitTimeMS());
+    }
+
+    @Test
+    @DisplayName("MaxAwaitTimeMS set")
+    void testMaxAwaitTimeMSSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(0, getMaxTimeMS(timeoutContext));
+        assertEquals(101, timeoutContext.getMaxAwaitTimeMS());
+    }
+
+    @Test
+    @DisplayName("MaxTimeMS and MaxAwaitTimeMS set")
+    void testMaxTimeMSAndMaxAwaitTimeMSSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(101, getMaxTimeMS(timeoutContext));
+        assertEquals(1001, timeoutContext.getMaxAwaitTimeMS());
+    }
+
+    @Test
+    @DisplayName("MaxCommitTimeMS set")
+    void testMaxCommitTimeMSSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_COMMIT);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(0, getMaxTimeMS(timeoutContext));
+        assertEquals(0, timeoutContext.getMaxAwaitTimeMS());
+        assertEquals(999L, timeoutContext.getMaxCommitTimeMS());
+    }
+
+    @Test
+    @DisplayName("All deprecated options set")
+    void testAllDeprecatedOptionsSet() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS);
+
+        assertFalse(timeoutContext.hasTimeoutMS());
+        assertEquals(101, getMaxTimeMS(timeoutContext));
+        assertEquals(1001, timeoutContext.getMaxAwaitTimeMS());
+        assertEquals(999, timeoutContext.getMaxCommitTimeMS());
+    }
+
+    @Test
+    @DisplayName("Use timeout if available or the alternative")
+    void testUseTimeoutIfAvailableOrTheAlternative() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS);
+        assertEquals(99L, timeoutContext.timeoutOrAlternative(99));
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L));
+        assertEquals(0L, timeoutContext.timeoutOrAlternative(99));
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L));
+        assertTrue(timeoutContext.timeoutOrAlternative(0) <= 999);
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L));
+        assertTrue(timeoutContext.timeoutOrAlternative(999999) <= 999);
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS);
+        assertEquals(0, timeoutContext.getMaxCommitTimeMS());
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L));
+        assertTrue(timeoutContext.getMaxCommitTimeMS() <= 999);
+    }
+
+    @Test
+    @DisplayName("withAdditionalReadTimeout works as expected")
+    void testWithAdditionalReadTimeout() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(0));
+        assertEquals(0L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS());
+
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(10_000L));
+        assertEquals(10_101L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS());
+
+        long originalValue = Long.MAX_VALUE - 100;
+        timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(originalValue));
+        assertEquals(Long.MAX_VALUE, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS());
+
+        assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L)).withAdditionalReadTimeout(1));
+
+        assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(10_000L)).withAdditionalReadTimeout(1));
+    }
+
+    @Test
+    @DisplayName("Expired works as expected")
+    void testExpired() {
+        TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L));
+        TimeoutContext longTimeout =
+                new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L));
+        TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS);
+        sleep(100);
+        assertFalse(hasExpired(noTimeout.getTimeout()));
+        assertFalse(hasExpired(longTimeout.getTimeout()));
+        assertTrue(hasExpired(smallTimeout.getTimeout()));
+    }
+
+    private static boolean hasExpired(@Nullable final Timeout timeout) {
+        return Timeout.nullAsInfinite(timeout).call(NANOSECONDS, () -> false, (ns) -> false, () -> true);
+    }
+
+    @Test
+    @DisplayName("throws when calculating timeout if expired")
+    void testThrowsWhenExpired() {
+        TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L));
+        TimeoutContext longTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L));
+        TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS);
+        sleep(100);
+
+        assertThrows(MongoOperationTimeoutException.class, smallTimeout::getReadTimeoutMS);
+        assertThrows(MongoOperationTimeoutException.class, smallTimeout::getWriteTimeoutMS);
+        assertThrows(MongoOperationTimeoutException.class, smallTimeout::getConnectTimeoutMs);
+        assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(smallTimeout));
+        assertThrows(MongoOperationTimeoutException.class, smallTimeout::getMaxCommitTimeMS);
+        assertThrows(MongoOperationTimeoutException.class, () -> smallTimeout.timeoutOrAlternative(1));
+        assertDoesNotThrow(longTimeout::getReadTimeoutMS);
+        assertDoesNotThrow(longTimeout::getWriteTimeoutMS);
+        assertDoesNotThrow(longTimeout::getConnectTimeoutMs);
+        assertDoesNotThrow(() -> getMaxTimeMS(longTimeout));
+        assertDoesNotThrow(longTimeout::getMaxCommitTimeMS);
+        assertDoesNotThrow(() -> longTimeout.timeoutOrAlternative(1));
+        assertDoesNotThrow(noTimeout::getReadTimeoutMS);
+        assertDoesNotThrow(noTimeout::getWriteTimeoutMS);
+        assertDoesNotThrow(noTimeout::getConnectTimeoutMs);
+        assertDoesNotThrow(() -> getMaxTimeMS(noTimeout));
+        assertDoesNotThrow(noTimeout::getMaxCommitTimeMS);
+        assertDoesNotThrow(() -> noTimeout.timeoutOrAlternative(1));
+    }
+
+    @Test
+    @DisplayName("validates minRoundTripTime for maxTimeMS")
+    void testValidatedMinRoundTripTime() {
+        Supplier<TimeoutContext> supplier = () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L));
+
+        assertTrue(getMaxTimeMS(supplier.get()) <= 100);
+        assertTrue(getMaxTimeMS(supplier.get().minRoundTripTimeMS(10)) <= 90);
+        assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(101)));
+        assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(100)));
+    }
+
+    @Test
+    @DisplayName("Test createTimeoutContext handles legacy settings")
+    void testCreateTimeoutContextLegacy() {
+        TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS);
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS);
+
+        ClientSession clientSession = Mockito.mock(ClientSession.class);
+        Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext);
+
+        TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings());
+        assertEquals(timeoutContext, actualTimeoutContext);
+    }
+
+    @Test
+    @DisplayName("Test createTimeoutContext with timeout legacy settings")
+    void testCreateTimeoutContextWithTimeoutLegacy() {
+        TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT);
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS);
+
+        ClientSession clientSession = Mockito.mock(ClientSession.class);
+        Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext);
+
+        TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings());
+        assertEquals(sessionTimeoutContext, actualTimeoutContext);
+    }
+
+    @Test
+    @DisplayName("Test createTimeoutContext with timeout")
+    void testCreateTimeoutContextWithTimeout() {
+        TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT);
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT.withMaxAwaitTimeMS(123));
+
+        ClientSession clientSession = Mockito.mock(ClientSession.class);
+        Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext);
+
+        TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings());
+        assertEquals(sessionTimeoutContext, actualTimeoutContext);
+    }
+
+    @Test
+    @DisplayName("should override maxTimeMS when MaxTimeSupplier is set")
+    void shouldOverrideMaximeMS() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1));
+
+        timeoutContext.setMaxTimeOverride(2L);
+
+        assertEquals(2, getMaxTimeMS(timeoutContext));
+    }
+
+    @Test
+    @DisplayName("should reset maxTimeMS to default behaviour")
+    void shouldResetMaximeMS() {
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1));
+        timeoutContext.setMaxTimeOverride(1L);
+
+        timeoutContext.resetToDefaultMaxTime();
+
+        assertTrue(getMaxTimeMS(timeoutContext) > 1);
+    }
+
+    static Stream<Arguments> shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs() {
+        return Stream.of(
+                //connectTimeoutMS, timeoutMS, expected
+                Arguments.of(500L, 1000L, 500L),
+                Arguments.of(0L, null, 0L),
+                Arguments.of(1000L, null, 1000L),
+                Arguments.of(1000L, 0L, 1000L),
+                Arguments.of(0L, 0L, 0L)
+        );
+    }
+
+    @ParameterizedTest
+    @MethodSource
+    @DisplayName("should choose connectTimeoutMS when connectTimeoutMS is less than timeoutMS")
+    void shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs(final Long connectTimeoutMS,
+                                                          final Long timeoutMS,
+                                                          final long expected) {
+        TimeoutContext timeoutContext = new TimeoutContext(
+                new TimeoutSettings(0,
+                connectTimeoutMS,
+                0,
+                timeoutMS,
+                0));
+
+        long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs();
+        assertEquals(expected, calculatedTimeoutMS);
+    }
+
+
+    static Stream<Arguments> shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS() {
+        return Stream.of(
+                //connectTimeoutMS, timeoutMS, expected
+                Arguments.of(1000L, 1000L, 999),
+                Arguments.of(1000L, 500L, 499L),
+                Arguments.of(0L, 1000L, 999L)
+        );
+    }
+
+    @ParameterizedTest
+    @MethodSource
+    @DisplayName("should choose timeoutMS when timeoutMS is less than connectTimeoutMS")
+    void shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS(final Long connectTimeoutMS,
+                                                          final Long timeoutMS,
+                                                          final long expected) {
+        TimeoutContext timeoutContext = new TimeoutContext(
+                new TimeoutSettings(0,
+                        connectTimeoutMS,
+                        0,
+                        timeoutMS,
+                        0));
+
+        long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs();
+        assertTrue(expected - calculatedTimeoutMS <= 1);
+    }
+
+    private TimeoutContextTest() {
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java
new file mode 100644
index 00000000000..71f63d32e6d
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal;
+
+import org.junit.jupiter.api.DynamicTest;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestFactory;
+
+import java.util.Collection;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static java.util.Arrays.asList;
+import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.DynamicTest.dynamicTest;
+
+final class TimeoutSettingsTest {
+
+    @TestFactory
+    Collection<DynamicTest> timeoutSettingsTest() {
+        return asList(
+                dynamicTest("test defaults", () -> {
+                    TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS;
+                    assertAll(
+                            () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()),
+                            () -> assertEquals(10_000, timeoutSettings.getConnectTimeoutMS()),
+                            () -> assertEquals(0, timeoutSettings.getReadTimeoutMS()),
+                            () -> assertNull(timeoutSettings.getTimeoutMS()),
+                            () -> assertEquals(0, timeoutSettings.getMaxTimeMS()),
+                            () -> assertEquals(0, timeoutSettings.getMaxAwaitTimeMS()),
+                            () -> assertNull(timeoutSettings.getWTimeoutMS())
+                    );
+                }),
+                dynamicTest("test overrides", () -> {
+                    TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS
+                            .withTimeoutMS(100L)
+                            .withMaxTimeMS(111)
+                            .withMaxAwaitTimeMS(11)
+                            .withMaxCommitMS(999L)
+                            .withReadTimeoutMS(11_000)
+                            .withWTimeoutMS(222L);
+                    assertAll(
+                            () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()),
+                            () -> assertEquals(10_000, timeoutSettings.getConnectTimeoutMS()),
+                            () -> assertEquals(11_000, timeoutSettings.getReadTimeoutMS()),
+                            () -> assertEquals(100, timeoutSettings.getTimeoutMS()),
+                            () -> assertEquals(111, timeoutSettings.getMaxTimeMS()),
+                            () -> assertEquals(11, timeoutSettings.getMaxAwaitTimeMS()),
+                            () -> assertEquals(999, timeoutSettings.getMaxCommitTimeMS()),
+                            () -> assertEquals(222, timeoutSettings.getWTimeoutMS())
+                    );
+                })
+        );
+    }
+
+    @Test
+    public void testTimeoutSettingsValidation() {
+        assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(-1L));
+        assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxAwaitTimeMS(-1));
+        assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxTimeMS(-1));
+        assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(10L).withMaxAwaitTimeMS(11));
+    }
+
+    private TimeoutSettingsTest() {
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java
index deb8e4a2e4a..20553fe881a 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTest.java
@@ -15,6 +15,8 @@
  */
 package com.mongodb.internal.async;
 
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import org.junit.jupiter.api.Test;
 
 import java.util.function.BiConsumer;
@@ -26,7 +28,7 @@
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
 final class AsyncFunctionsTest extends AsyncFunctionsTestAbstract {
-
+    private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0L, 0));
     @Test
     void test1Method() {
         // the number of expected variations is often: 1 + N methods invoked
@@ -684,6 +686,7 @@ void testRetryLoop() {
                 },
                 (callback) -> {
                     beginAsync().thenRunRetryingWhile(
+                            TIMEOUT_CONTEXT,
                             c -> async(plainTest(0) ? 1 : 2, c),
                             e -> e.getMessage().equals("exception-1")
                     ).finish(callback);
diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java
index bc071c9a4f4..970d87d33ed 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java
@@ -15,11 +15,20 @@
  */
 package com.mongodb.internal.async.function;
 
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.client.syncadapter.SupplyingCallback;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.function.LoopState.AttachmentKey;
 import com.mongodb.internal.operation.retry.AttachmentKeys;
 import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.DisplayName;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.util.stream.Stream;
 
 import static org.junit.jupiter.api.Assertions.assertAll;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -28,11 +37,43 @@
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Named.named;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
+import static org.mockito.Mockito.mock;
 
 final class RetryStateTest {
-    @Test
-    void unlimitedAttemptsAndAdvance() {
-        RetryState retryState = new RetryState();
+    private static final TimeoutContext TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L,
+            0L, null, 0L));
+
+    private static final TimeoutContext TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L,
+            0L, 1L, 0L));
+
+    private static final TimeoutContext TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L,
+            0L, 0L, 0L));
+    private static final String EXPECTED_TIMEOUT_MESSAGE = "Retry attempt exceeded the timeout limit.";
+
+    static Stream<Arguments> infiniteTimeout() {
+        return Stream.of(
+                arguments(named("Infinite timeoutMs", TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT))
+        );
+    }
+
+    static Stream<Arguments> expiredTimeout() {
+        return Stream.of(
+                arguments(named("Expired timeoutMs", TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT))
+        );
+    }
+
+    static Stream<Arguments> noTimeout() {
+        return Stream.of(
+                arguments(named("No timeoutMs", TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT))
+        );
+    }
+
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void unlimitedAttemptsAndAdvance(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         assertAll(
                 () -> assertTrue(retryState.isFirstAttempt()),
                 () -> assertEquals(0, retryState.attempt()),
@@ -57,7 +98,7 @@ void unlimitedAttemptsAndAdvance() {
 
     @Test
     void limitedAttemptsAndAdvance() {
-        RetryState retryState = new RetryState(0);
+        RetryState retryState = RetryState.withNonRetryableState();
         RuntimeException attemptException = new RuntimeException() {
         };
         assertAll(
@@ -75,9 +116,10 @@ void limitedAttemptsAndAdvance() {
         );
     }
 
-    @Test
-    void markAsLastAttemptAdvanceWithRuntimeException() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void markAsLastAttemptAdvanceWithRuntimeException(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         retryState.markAsLastAttempt();
         assertTrue(retryState.isLastAttempt());
         RuntimeException attemptException = new RuntimeException() {
@@ -86,9 +128,10 @@ void markAsLastAttemptAdvanceWithRuntimeException() {
                 () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail()));
     }
 
-    @Test
-    void markAsLastAttemptAdvanceWithError() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest(name = "should advance with non-retryable error when marked as last attempt and : ''{0}''")
+    @MethodSource({"infiniteTimeout", "expiredTimeout", "noTimeout"})
+    void markAsLastAttemptAdvanceWithError(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         retryState.markAsLastAttempt();
         assertTrue(retryState.isLastAttempt());
         Error attemptException = new Error() {
@@ -97,32 +140,46 @@ void markAsLastAttemptAdvanceWithError() {
                 () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail()));
     }
 
-    @Test
-    void breakAndThrowIfRetryAndFirstAttempt() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndThrowIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         retryState.breakAndThrowIfRetryAnd(Assertions::fail);
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
-    void breakAndThrowIfRetryAndFalse() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndThrowIfRetryAndFalse(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         advance(retryState);
         retryState.breakAndThrowIfRetryAnd(() -> false);
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
     void breakAndThrowIfRetryAndTrue() {
-        RetryState retryState = new RetryState();
+        RetryState retryState = new RetryState(TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT);
         advance(retryState);
         assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true));
         assertTrue(retryState.isLastAttempt());
     }
 
     @Test
-    void breakAndThrowIfRetryIfPredicateThrows() {
-        RetryState retryState = new RetryState();
+    void breakAndThrowIfRetryAndTrueWithExpiredTimeout() {
+        TimeoutContext tContextMock = mock(TimeoutContext.class);
+
+        RetryState retryState = new RetryState(tContextMock);
+        advance(retryState);
+        assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true));
+        assertTrue(retryState.isLastAttempt());
+    }
+
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndThrowIfRetryIfPredicateThrows(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         advance(retryState);
         RuntimeException e = new RuntimeException() {
         };
@@ -132,18 +189,20 @@ void breakAndThrowIfRetryIfPredicateThrows() {
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
-    void breakAndCompleteIfRetryAndFirstAttempt() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndCompleteIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         SupplyingCallback<?> callback = new SupplyingCallback<>();
         assertFalse(retryState.breakAndCompleteIfRetryAnd(Assertions::fail, callback));
         assertFalse(callback.completed());
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
-    void breakAndCompleteIfRetryAndFalse() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndCompleteIfRetryAndFalse(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         advance(retryState);
         SupplyingCallback<?> callback = new SupplyingCallback<>();
         assertFalse(retryState.breakAndCompleteIfRetryAnd(() -> false, callback));
@@ -151,9 +210,10 @@ void breakAndCompleteIfRetryAndFalse() {
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
-    void breakAndCompleteIfRetryAndTrue() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndCompleteIfRetryAndTrue(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         advance(retryState);
         SupplyingCallback<?> callback = new SupplyingCallback<>();
         assertTrue(retryState.breakAndCompleteIfRetryAnd(() -> true, callback));
@@ -161,9 +221,10 @@ void breakAndCompleteIfRetryAndTrue() {
         assertTrue(retryState.isLastAttempt());
     }
 
-    @Test
-    void breakAndCompleteIfRetryAndPredicateThrows() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void breakAndCompleteIfRetryAndPredicateThrows(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         advance(retryState);
         Error e = new Error() {
         };
@@ -175,25 +236,89 @@ void breakAndCompleteIfRetryAndPredicateThrows() {
         assertFalse(retryState.isLastAttempt());
     }
 
-    @Test
-    void advanceOrThrowPredicateFalse() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void advanceOrThrowPredicateFalse(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException attemptException = new RuntimeException() {
         };
         assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> false));
     }
 
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout"})
+    @DisplayName("should rethrow detected timeout exception even if timeout in retry state is not expired")
+    void advanceReThrowDetectedTimeoutExceptionEvenIfTimeoutInRetryStateIsNotExpired(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
+
+        MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed");
+        MongoOperationTimeoutException actualTimeoutException =
+                assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException,
+                        (e1, e2) -> expectedTimeoutException,
+                        (rs, e) -> false));
+
+        Assertions.assertEquals(actualTimeoutException, expectedTimeoutException);
+    }
+
+    @Test
+    @DisplayName("should throw timeout exception from retry, when transformer swallows original timeout exception")
+    void advanceThrowTimeoutExceptionWhenTransformerSwallowOriginalTimeoutException() {
+        RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT);
+        RuntimeException previousAttemptException = new RuntimeException() {
+        };
+        MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed");
+
+        retryState.advanceOrThrow(previousAttemptException,
+                (e1, e2) -> previousAttemptException,
+                (rs, e) -> true);
+
+        MongoOperationTimeoutException actualTimeoutException =
+                assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException,
+                        (e1, e2) -> previousAttemptException,
+                        (rs, e) -> false));
+
+        Assertions.assertNotEquals(actualTimeoutException, expectedTimeoutException);
+        Assertions.assertEquals(EXPECTED_TIMEOUT_MESSAGE, actualTimeoutException.getMessage());
+        Assertions.assertEquals(previousAttemptException, actualTimeoutException.getCause(),
+                "Retry timeout exception should have a cause if transformer returned non-timeout exception.");
+    }
+
+
+    @Test
+    @DisplayName("should throw original timeout exception from retry, when transformer returns original timeout exception")
+    void advanceThrowOriginalTimeoutExceptionWhenTransformerReturnsOriginalTimeoutException() {
+        RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT);
+        RuntimeException previousAttemptException = new RuntimeException() {
+        };
+        MongoOperationTimeoutException expectedTimeoutException = TimeoutContext
+                .createMongoTimeoutException("Server selection failed");
+
+        retryState.advanceOrThrow(previousAttemptException,
+                (e1, e2) -> previousAttemptException,
+                (rs, e) -> true);
+
+        MongoOperationTimeoutException actualTimeoutException =
+                assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException,
+                        (e1, e2) -> expectedTimeoutException,
+                        (rs, e) -> false));
+
+        Assertions.assertEquals(actualTimeoutException, expectedTimeoutException);
+        Assertions.assertNull(actualTimeoutException.getCause(),
+                "Original timeout exception should not have a cause if transformer already returned timeout exception.");
+    }
+
     @Test
     void advanceOrThrowPredicateTrueAndLastAttempt() {
-        RetryState retryState = new RetryState(0);
+        RetryState retryState = RetryState.withNonRetryableState();
         Error attemptException = new Error() {
         };
         assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> true));
     }
 
-    @Test
-    void advanceOrThrowPredicateThrowsAfterFirstAttempt() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void advanceOrThrowPredicateThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException predicateException = new RuntimeException() {
         };
         RuntimeException attemptException = new RuntimeException() {
@@ -206,8 +331,26 @@ void advanceOrThrowPredicateThrowsAfterFirstAttempt() {
     }
 
     @Test
-    void advanceOrThrowPredicateThrows() {
-        RetryState retryState = new RetryState();
+    void advanceOrThrowPredicateThrowsTimeoutAfterFirstAttempt() {
+        RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT);
+        RuntimeException predicateException = new RuntimeException() {
+        };
+        RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE);
+        MongoOperationTimeoutException mongoOperationTimeoutException = assertThrows(MongoOperationTimeoutException.class,
+                () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> {
+                    assertTrue(rs.isFirstAttempt());
+                    assertEquals(attemptException, e);
+                    throw predicateException;
+                }));
+
+        assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage());
+        assertNull(mongoOperationTimeoutException.getCause());
+    }
+
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void advanceOrThrowPredicateThrows(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException firstAttemptException = new RuntimeException() {
         };
         retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true);
@@ -222,9 +365,10 @@ void advanceOrThrowPredicateThrows() {
         }));
     }
 
-    @Test
-    void advanceOrThrowTransformerThrowsAfterFirstAttempt() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout", "expiredTimeout"})
+    void advanceOrThrowTransformerThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException transformerException = new RuntimeException() {
         };
         assertThrows(transformerException.getClass(), () -> retryState.advanceOrThrow(new AssertionError(),
@@ -234,9 +378,10 @@ void advanceOrThrowTransformerThrowsAfterFirstAttempt() {
                 (rs, e) -> fail()));
     }
 
-    @Test
-    void advanceOrThrowTransformerThrows() throws Throwable {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"}) //TODO mock?
+    void advanceOrThrowTransformerThrows(final TimeoutContext timeoutContext) throws Throwable {
+        RetryState retryState = new RetryState(timeoutContext);
         Error firstAttemptException = new Error() {
         };
         retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true);
@@ -249,9 +394,10 @@ void advanceOrThrowTransformerThrows() throws Throwable {
                 (rs, e) -> fail()));
     }
 
-    @Test
-    void advanceOrThrowTransformAfterFirstAttempt() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void advanceOrThrowTransformAfterFirstAttempt(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException attemptException = new RuntimeException() {
         };
         RuntimeException transformerResult = new RuntimeException() {
@@ -269,8 +415,32 @@ void advanceOrThrowTransformAfterFirstAttempt() {
     }
 
     @Test
-    void advanceOrThrowTransform() {
-        RetryState retryState = new RetryState();
+    void advanceOrThrowTransformThrowsTimeoutExceptionAfterFirstAttempt() {
+        RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT);
+
+        RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE);
+        RuntimeException transformerResult = new RuntimeException();
+
+        MongoOperationTimeoutException mongoOperationTimeoutException =
+                assertThrows(MongoOperationTimeoutException.class, () -> retryState.advanceOrThrow(attemptException,
+                        (e1, e2) -> {
+                            assertNull(e1);
+                            assertEquals(attemptException, e2);
+                            return transformerResult;
+                        },
+                        (rs, e) -> {
+                            assertEquals(attemptException, e);
+                            return false;
+                        }));
+
+        assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage());
+        assertEquals(transformerResult, mongoOperationTimeoutException.getCause());
+    }
+
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void advanceOrThrowTransform(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         RuntimeException firstAttemptException = new RuntimeException() {
         };
         retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true);
@@ -290,9 +460,10 @@ void advanceOrThrowTransform() {
                 }));
     }
 
-    @Test
-    void attachAndAttachment() {
-        RetryState retryState = new RetryState();
+    @ParameterizedTest
+    @MethodSource({"infiniteTimeout", "noTimeout"})
+    void attachAndAttachment(final TimeoutContext timeoutContext) {
+        RetryState retryState = new RetryState(timeoutContext);
         AttachmentKey<Integer> attachmentKey = AttachmentKeys.maxWireVersion();
         int attachmentValue = 1;
         assertFalse(retryState.attachment(attachmentKey).isPresent());
diff --git a/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy
index 7cbd37bb862..824a724ee81 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy
@@ -18,17 +18,16 @@ package com.mongodb.internal.binding
 
 import com.mongodb.ReadPreference
 import com.mongodb.ServerAddress
-import com.mongodb.ServerApi
-import com.mongodb.ServerApiVersion
 import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.connection.Cluster
 import com.mongodb.internal.connection.Server
 import com.mongodb.internal.connection.ServerTuple
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class SingleServerBindingSpecification extends Specification {
 
     def 'should implement getters'() {
@@ -42,26 +41,22 @@ class SingleServerBindingSpecification extends Specification {
                             .build())
         }
         def address = new ServerAddress()
-        def serverApi = ServerApi.builder().version(ServerApiVersion.V1).build()
+        def operationContext = OPERATION_CONTEXT
 
         when:
-        def binding = new SingleServerBinding(cluster, address, serverApi, IgnorableRequestContext.INSTANCE)
+
+        def binding = new SingleServerBinding(cluster, address, operationContext)
 
         then:
         binding.readPreference == ReadPreference.primary()
-        binding.serverApi == serverApi
-
-        when:
-        def source = binding.getReadConnectionSource()
+        binding.getOperationContext() == operationContext
 
-        then:
-        source.serverApi == serverApi
 
         when:
-        source = binding.getWriteConnectionSource()
+        def source = binding.getReadConnectionSource()
 
         then:
-        source.serverApi == serverApi
+        source.getOperationContext() == operationContext
     }
 
     def 'should increment and decrement reference counts'() {
@@ -77,7 +72,7 @@ class SingleServerBindingSpecification extends Specification {
         def address = new ServerAddress()
 
         when:
-        def binding = new SingleServerBinding(cluster, address, null, IgnorableRequestContext.INSTANCE)
+        def binding = new SingleServerBinding(cluster, address, OPERATION_CONTEXT)
 
         then:
         binding.count == 1
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java
index cc2aa11f74a..5b2cb1ab5f6 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java
@@ -80,9 +80,12 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.stream.Collectors;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
 import static com.mongodb.assertions.Assertions.assertFalse;
 import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.Arrays.asList;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -138,22 +141,22 @@ public void setUp() {
             settingsBuilder.minSize(poolOptions.getNumber("minPoolSize").intValue());
         }
         if (poolOptions.containsKey("maxIdleTimeMS")) {
-            settingsBuilder.maxConnectionIdleTime(poolOptions.getNumber("maxIdleTimeMS").intValue(), TimeUnit.MILLISECONDS);
+            settingsBuilder.maxConnectionIdleTime(poolOptions.getNumber("maxIdleTimeMS").intValue(), MILLISECONDS);
         }
         if (poolOptions.containsKey("waitQueueTimeoutMS")) {
-            settingsBuilder.maxWaitTime(poolOptions.getNumber("waitQueueTimeoutMS").intValue(), TimeUnit.MILLISECONDS);
+            settingsBuilder.maxWaitTime(poolOptions.getNumber("waitQueueTimeoutMS").intValue(), MILLISECONDS);
         }
         if (poolOptions.containsKey("backgroundThreadIntervalMS")) {
             long intervalMillis = poolOptions.getNumber("backgroundThreadIntervalMS").longValue();
             assertFalse(intervalMillis == 0);
             if (intervalMillis < 0) {
-                settingsBuilder.maintenanceInitialDelay(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
+                settingsBuilder.maintenanceInitialDelay(Long.MAX_VALUE, MILLISECONDS);
             } else {
                 /* Using frequency/period instead of an interval as required by the specification is incorrect, for example,
                  * because it opens up a possibility to run the background thread non-stop if runs are as long as or longer than the period.
                  * Nevertheless, I am reusing what we already have in the driver instead of clogging up the implementation. */
                 settingsBuilder.maintenanceFrequency(
-                        poolOptions.getNumber("backgroundThreadIntervalMS").longValue(), TimeUnit.MILLISECONDS);
+                        poolOptions.getNumber("backgroundThreadIntervalMS").longValue(), MILLISECONDS);
             }
         }
         if (poolOptions.containsKey("maxConnecting")) {
@@ -171,7 +174,7 @@ public void setUp() {
             case UNIT: {
                 ServerId serverId = new ServerId(new ClusterId(), new ServerAddress("host1"));
                 pool = new DefaultConnectionPool(serverId, new TestInternalConnectionFactory(), settings, internalSettings,
-                        SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)));
+                        SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)), OPERATION_CONTEXT_FACTORY);
                 break;
             }
             case INTEGRATION: {
@@ -190,7 +193,7 @@ public void setUp() {
                                 new TestCommandListener(),
                                 ClusterFixture.getServerApi()
                         ),
-                        settings, internalSettings, sdamProvider));
+                        settings, internalSettings, sdamProvider, OPERATION_CONTEXT_FACTORY));
                 sdamProvider.initialize(new DefaultSdamServerDescriptionManager(mockedCluster(), serverId, mock(ServerListener.class),
                         mock(ServerMonitor.class), pool, connectionMode));
                 setFailPoint();
@@ -244,7 +247,7 @@ public void shouldPassAllOutcomes() throws Exception {
                     assumeNotNull(eventClass);
                     long timeoutMillis = operation.getNumber("timeout", new BsonInt64(TimeUnit.SECONDS.toMillis(5)))
                             .longValue();
-                    listener.waitForEvent(eventClass, operation.getNumber("count").intValue(), timeoutMillis, TimeUnit.MILLISECONDS);
+                    listener.waitForEvent(eventClass, operation.getNumber("count").intValue(), timeoutMillis, MILLISECONDS);
                 } else if (name.equals("clear")) {
                     pool.invalidate(null);
                 } else if (name.equals("ready")) {
@@ -383,6 +386,10 @@ private void assertReasonMatch(final BsonDocument expectedEvent, final Connectio
         }
     }
 
+    protected OperationContext createOperationContext() {
+        return ClusterFixture.createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(settings.getMaxWaitTime(MILLISECONDS)));
+    }
+
     private void assertReasonMatch(final BsonDocument expectedEvent, final ConnectionCheckOutFailedEvent connectionCheckOutFailedEvent) {
         if (!expectedEvent.containsKey("reason")) {
             return;
@@ -528,7 +535,8 @@ private <Event> Event getNextEvent(final Iterator<Object> eventsIterator, final
     }
 
     private static void executeAdminCommand(final BsonDocument command) {
-        new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(ClusterFixture.getBinding());
+        new CommandReadOperation<>("admin", command, new BsonDocumentCodec())
+                .execute(ClusterFixture.getBinding());
     }
 
     private void setFailPoint() {
@@ -624,13 +632,6 @@ public InternalConnection get(final OperationContext operationContext) {
             return result;
         }
 
-        @Override
-        public InternalConnection get(final OperationContext operationContext, final long timeout, final TimeUnit timeUnit) {
-            InternalConnection result = pool.get(new OperationContext(), timeout, timeUnit);
-            updateConnectionIdLocalValueAdjustment(result);
-            return result;
-        }
-
         @Override
         public void getAsync(final OperationContext operationContext, final SingleResultCallback<InternalConnection> callback) {
             pool.getAsync(operationContext, (result, problem) -> {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java
index c0924c3d74d..6fe76d0198a 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java
@@ -27,7 +27,9 @@
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.connection.ServerType;
 import com.mongodb.event.ClusterListener;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue;
+import com.mongodb.internal.time.Timeout;
 import org.bson.BsonArray;
 import org.bson.BsonDocument;
 import org.bson.BsonInt32;
@@ -42,6 +44,8 @@
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
 import static com.mongodb.connection.ServerConnectionState.CONNECTING;
 import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription;
 import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException;
@@ -73,26 +77,29 @@ protected void applyResponse(final BsonArray response) {
         if (helloResult.isEmpty()) {
             serverDescription = ServerDescription.builder().type(ServerType.UNKNOWN).state(CONNECTING).address(serverAddress).build();
         } else {
-            serverDescription = createServerDescription(serverAddress, helloResult, 5000000);
+            serverDescription = createServerDescription(serverAddress, helloResult, 5000000, 0);
         }
         factory.sendNotification(serverAddress, serverDescription);
     }
 
     protected void applyApplicationError(final BsonDocument applicationError) {
+        Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout();
         ServerAddress serverAddress = new ServerAddress(applicationError.getString("address").getValue());
+        TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS);
         int errorGeneration = applicationError.getNumber("generation",
-                new BsonInt32(((DefaultServer) getCluster().getServersSnapshot().getServer(serverAddress))
+                new BsonInt32(((DefaultServer) getCluster().getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress))
                         .getConnectionPool().getGeneration())).intValue();
         int maxWireVersion = applicationError.getNumber("maxWireVersion").intValue();
         String when = applicationError.getString("when").getValue();
         String type = applicationError.getString("type").getValue();
 
-        DefaultServer server = (DefaultServer) cluster.getServersSnapshot().getServer(serverAddress);
+        DefaultServer server = (DefaultServer) cluster.getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress);
         RuntimeException exception;
 
         switch (type) {
             case "command":
-                exception = getCommandFailureException(applicationError.getDocument("response"), serverAddress);
+                exception = getCommandFailureException(applicationError.getDocument("response"), serverAddress,
+                        OPERATION_CONTEXT.getTimeoutContext());
                 break;
             case "network":
                 exception = new MongoSocketReadException("Read error", serverAddress, new IOException());
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy
index 0f51bab44a8..50f78639168 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy
@@ -31,14 +31,19 @@ import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
 import com.mongodb.event.ServerDescriptionChangedEvent
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.selector.ReadPreferenceServerSelector
 import com.mongodb.internal.selector.ServerAddressSelector
 import com.mongodb.internal.selector.WritableServerSelector
+import com.mongodb.internal.time.Timeout
 import spock.lang.Specification
 import util.spock.annotations.Slow
 
 import java.util.concurrent.CountDownLatch
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
+import static com.mongodb.ClusterFixture.createOperationContext
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
 import static com.mongodb.connection.ClusterSettings.builder
 import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY
@@ -61,7 +66,6 @@ class BaseClusterSpecification extends Specification {
         given:
         def clusterSettings = builder().mode(MULTIPLE)
                 .hosts([firstServer, secondServer, thirdServer])
-                .serverSelectionTimeout(1, MILLISECONDS)
                 .serverSelector(new ServerAddressSelector(firstServer))
                 .build()
         def cluster = new BaseCluster(new ClusterId(), clusterSettings, factory) {
@@ -70,7 +74,7 @@ class BaseClusterSpecification extends Specification {
             }
 
             @Override
-            Cluster.ServersSnapshot getServersSnapshot() {
+            Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout,  final TimeoutContext timeoutContext) {
                 Cluster.ServersSnapshot result = {
                     serverAddress -> throw new UnsupportedOperationException()
                 }
@@ -87,7 +91,17 @@ class BaseClusterSpecification extends Specification {
                 factory.getSettings())
 
         when: 'a server is selected before initialization'
-        cluster.selectServer({ def clusterDescription -> [] }, new OperationContext())
+        cluster.selectServer({ def clusterDescription -> [] },
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1)))
+
+        then: 'a MongoTimeoutException is thrown'
+        thrown(MongoTimeoutException)
+
+        when: 'a server is selected before initialization and timeoutMS is set'
+        cluster.selectServer({ def clusterDescription -> [] },
+                createOperationContext(TIMEOUT_SETTINGS
+                        .withServerSelectionTimeoutMS(1)
+                        .withTimeout(1, MILLISECONDS)))
 
         then: 'a MongoTimeoutException is thrown'
         thrown(MongoTimeoutException)
@@ -120,7 +134,7 @@ class BaseClusterSpecification extends Specification {
         factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers)
 
         expect:
-        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), new OperationContext())
+        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT)
                 .serverDescription.address == firstServer
     }
 
@@ -128,7 +142,6 @@ class BaseClusterSpecification extends Specification {
         given:
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
-                        .serverSelectionTimeout(1, SECONDS)
                         .hosts([firstServer, secondServer, thirdServer])
                         .build(),
                 factory)
@@ -137,7 +150,9 @@ class BaseClusterSpecification extends Specification {
         factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers)
 
         expect:
-        cluster.selectServer(new ServerAddressSelector(firstServer), new OperationContext()).serverDescription.address == firstServer
+        cluster.selectServer(new ServerAddressSelector(firstServer),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1_000)))
+                .serverDescription.address == firstServer
     }
 
     def 'should apply local threshold when custom server selector is present'() {
@@ -155,7 +170,7 @@ class BaseClusterSpecification extends Specification {
         factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers)
 
         expect:
-        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.nearest()), new OperationContext())
+        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.nearest()), OPERATION_CONTEXT)
                 .serverDescription.address == firstServer
     }
 
@@ -173,7 +188,7 @@ class BaseClusterSpecification extends Specification {
         factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers)
 
         expect: // firstServer is the only secondary within the latency threshold
-        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), new OperationContext())
+        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT)
                 .serverDescription.address == firstServer
     }
 
@@ -182,7 +197,6 @@ class BaseClusterSpecification extends Specification {
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
                         .hosts([firstServer, secondServer])
-                        .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS)
                         .build(),
                 factory)
 
@@ -193,10 +207,12 @@ class BaseClusterSpecification extends Specification {
                                                                .exception(new MongoInternalException('oops'))
                                                                .build())
 
-        cluster.selectServer(new WritableServerSelector(), new OperationContext())
+        cluster.selectServer(new WritableServerSelector(),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS)))
 
         then:
         def e = thrown(MongoTimeoutException)
+
         e.getMessage().startsWith("Timed out while waiting for a server " +
                 'that matches WritableServerSelector. Client view of cluster state is {type=UNKNOWN')
         e.getMessage().contains('{address=localhost:27017, type=UNKNOWN, state=CONNECTING, ' +
@@ -212,7 +228,6 @@ class BaseClusterSpecification extends Specification {
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
                         .hosts([firstServer, secondServer, thirdServer])
-                        .serverSelectionTimeout(serverSelectionTimeoutMS, SECONDS)
                         .build(),
                 factory)
         factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers)
@@ -220,7 +235,8 @@ class BaseClusterSpecification extends Specification {
         factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers)
 
         expect:
-        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), new OperationContext())
+        cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS)))
                 .serverDescription.address == thirdServer
 
         cleanup:
@@ -236,7 +252,6 @@ class BaseClusterSpecification extends Specification {
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
                         .hosts([firstServer, secondServer, thirdServer])
-                        .serverSelectionTimeout(-1, SECONDS)
                         .build(),
                 factory)
 
@@ -244,7 +259,8 @@ class BaseClusterSpecification extends Specification {
         def latch = new CountDownLatch(1)
         def thread = new Thread({
             try {
-                cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), new OperationContext())
+                cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()),
+                        createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(-1_000)))
             } catch (MongoInterruptedException e) {
                 latch.countDown()
             }
@@ -265,14 +281,13 @@ class BaseClusterSpecification extends Specification {
         given:
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
-                        .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS)
                         .hosts([firstServer, secondServer, thirdServer])
                         .build(),
                 factory)
         factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers)
 
         when:
-        def serverDescription = selectServerAsync(cluster, firstServer).getDescription()
+        def serverDescription = selectServerAsync(cluster, firstServer, serverSelectionTimeoutMS).getDescription()
 
         then:
         serverDescription.address == firstServer
@@ -288,14 +303,13 @@ class BaseClusterSpecification extends Specification {
         given:
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
-                        .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS)
                         .hosts([firstServer, secondServer, thirdServer])
                         .build(),
                 factory)
 
         when:
-        def secondServerLatch = selectServerAsync(cluster, secondServer)
-        def thirdServerLatch = selectServerAsync(cluster, thirdServer)
+        def secondServerLatch = selectServerAsync(cluster, secondServer, serverSelectionTimeoutMS)
+        def thirdServerLatch = selectServerAsync(cluster, thirdServer, serverSelectionTimeoutMS)
         factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers)
         factory.sendNotification(thirdServer, REPLICA_SET_SECONDARY, allServers)
 
@@ -335,12 +349,11 @@ class BaseClusterSpecification extends Specification {
         def cluster = new MultiServerCluster(new ClusterId(),
                 builder().mode(MULTIPLE)
                         .hosts([firstServer, secondServer, thirdServer])
-                        .serverSelectionTimeout(serverSelectionTimeoutMS, MILLISECONDS)
                         .build(),
                 factory)
 
         when:
-        selectServerAsyncAndGet(cluster, firstServer)
+        selectServerAsyncAndGet(cluster, firstServer, serverSelectionTimeoutMS)
 
         then:
         thrown(MongoTimeoutException)
@@ -354,12 +367,21 @@ class BaseClusterSpecification extends Specification {
     }
 
     def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress) {
-        selectServerAsync(cluster, serverAddress).get()
+        selectServerAsync(cluster, serverAddress, 1_000)
+    }
+
+    def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) {
+        selectServerAsync(cluster, serverAddress, serverSelectionTimeoutMS).get()
     }
 
     def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress) {
+        selectServerAsync(cluster, serverAddress, 1_000)
+    }
+
+    def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) {
         def serverLatch = new ServerLatch()
-        cluster.selectServerAsync(new ServerAddressSelector(serverAddress), new OperationContext()) {
+        cluster.selectServerAsync(new ServerAddressSelector(serverAddress),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) {
             ServerTuple result, MongoException e ->
                 serverLatch.server = result != null ? result.getServer() : null
                 serverLatch.serverDescription = result != null ? result.serverDescription : null
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java
index 641f814a6dd..1cba6d91c3c 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java
@@ -15,6 +15,7 @@
  */
 package com.mongodb.internal.connection;
 
+import com.mongodb.ClusterFixture;
 import com.mongodb.ServerAddress;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ClusterDescription;
@@ -47,7 +48,7 @@ void selectServerToleratesWhenThereIsNoServerForTheSelectedAddress() {
                 new ServerAddressSelector(serverAddressA),
                 clusterDescriptionAB,
                 serversSnapshotB,
-                new OperationContext().getServerDeprioritization(),
+                ClusterFixture.OPERATION_CONTEXT.getServerDeprioritization(),
                 ClusterSettings.builder().build()));
     }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java
new file mode 100644
index 00000000000..f7873379c3b
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.connection;
+
+import com.mongodb.MongoCommandException;
+import com.mongodb.ServerAddress;
+import com.mongodb.ServerApi;
+import com.mongodb.ServerApiVersion;
+import com.mongodb.connection.ClusterId;
+import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.connection.ServerId;
+import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
+import org.bson.BsonDocument;
+import org.bson.codecs.Decoder;
+import org.junit.jupiter.api.Test;
+
+import static com.mongodb.assertions.Assertions.assertFalse;
+import static com.mongodb.connection.ClusterConnectionMode.SINGLE;
+import static com.mongodb.internal.connection.CommandHelper.executeCommand;
+import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync;
+import static com.mongodb.internal.connection.CommandHelper.executeCommandWithoutCheckingForFailure;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CommandHelperTest {
+
+    static final BsonDocument COMMAND = BsonDocument.parse("{ping: 1}");
+    static final BsonDocument OK = BsonDocument.parse("{ok: 1}");
+    static final BsonDocument NOT_OK = BsonDocument.parse("{ok: 0, errmsg: 'error'}");
+
+    static final ConnectionDescription CONNECTION_DESCRIPTION = new ConnectionDescription(
+            new ServerId(new ClusterId("cluster"), new ServerAddress()));
+
+    @Test
+    @SuppressWarnings("unchecked")
+    void testExecuteCommand() {
+        InternalConnection internalConnection = mock(InternalConnection.class);
+        ServerDescription serverDescription = mock(ServerDescription.class);
+        OperationContext operationContext = createOperationContext();
+
+
+        when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION);
+        when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK);
+        when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription);
+
+        assertEquals(OK,
+                executeCommand("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext));
+
+        verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext));
+    }
+
+    @Test
+    @SuppressWarnings("unchecked")
+    void testExecuteCommandWithoutCheckingForFailure() {
+        InternalConnection internalConnection = mock(InternalConnection.class);
+        ServerDescription serverDescription = mock(ServerDescription.class);
+        OperationContext operationContext = createOperationContext();
+
+        when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION);
+        when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription);
+        when(internalConnection.sendAndReceive(any(), any(), any()))
+                .thenThrow(new MongoCommandException(NOT_OK, new ServerAddress()));
+
+        assertEquals(new BsonDocument(),
+                executeCommandWithoutCheckingForFailure("admin", COMMAND, SINGLE, operationContext.getServerApi(),
+                        internalConnection, operationContext));
+
+        verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext));
+    }
+
+
+    @Test
+    @SuppressWarnings("unchecked")
+    void testExecuteCommandAsyncUsesTheOperationContext() {
+        InternalConnection internalConnection = mock(InternalConnection.class);
+        OperationContext operationContext = createOperationContext();
+        ServerDescription serverDescription = mock(ServerDescription.class);
+
+        when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription);
+        when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION);
+        when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK);
+
+        executeCommandAsync("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext,
+                (r, t) -> {});
+
+        verify(internalConnection).sendAndReceiveAsync(any(CommandMessage.class), any(Decoder.class), eq(operationContext), any());
+    }
+
+    @Test
+    void testIsCommandOk() {
+        assertTrue(CommandHelper.isCommandOk(OK));
+        assertTrue(CommandHelper.isCommandOk(BsonDocument.parse("{ok: true}")));
+        assertFalse(CommandHelper.isCommandOk(NOT_OK));
+        assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: false}")));
+        assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 11}")));
+        assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 'nope'}")));
+        assertFalse(CommandHelper.isCommandOk(new BsonDocument()));
+    }
+
+
+    OperationContext createOperationContext() {
+        return new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                new TimeoutContext(TimeoutSettings.DEFAULT), ServerApi.builder().version(ServerApiVersion.V1).build());
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy
index edc6e92c30e..427fe23c613 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy
@@ -22,6 +22,8 @@ import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.connection.ClusterConnectionMode
 import com.mongodb.connection.ServerType
+import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.bulk.InsertRequest
 import com.mongodb.internal.bulk.WriteRequestWithIndex
 import com.mongodb.internal.session.SessionContext
@@ -63,7 +65,7 @@ class CommandMessageSpecification extends Specification {
         def output = new BasicOutputBuffer()
 
         when:
-        message.encode(output, sessionContext)
+        message.encode(output, operationContext)
 
         then:
         def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
@@ -77,11 +79,11 @@ class CommandMessageSpecification extends Specification {
         def expectedCommandDocument = command.clone()
                 .append('$db', new BsonString(namespace.databaseName))
 
-        if (sessionContext.clusterTime != null) {
-            expectedCommandDocument.append('$clusterTime', sessionContext.clusterTime)
+        if (operationContext.getSessionContext().clusterTime != null) {
+            expectedCommandDocument.append('$clusterTime', operationContext.getSessionContext().clusterTime)
         }
-        if (sessionContext.hasSession() && responseExpected) {
-            expectedCommandDocument.append('lsid', sessionContext.sessionId)
+        if (operationContext.getSessionContext().hasSession() && responseExpected) {
+            expectedCommandDocument.append('lsid', operationContext.getSessionContext().sessionId)
         }
 
         if (readPreference != ReadPreference.primary()) {
@@ -92,35 +94,44 @@ class CommandMessageSpecification extends Specification {
         getCommandDocument(byteBuf, replyHeader) == expectedCommandDocument
 
         where:
-        [readPreference, serverType, clusterConnectionMode, sessionContext, responseExpected] << [
+        [readPreference, serverType, clusterConnectionMode, operationContext, responseExpected, isCryptd] << [
                 [ReadPreference.primary(), ReadPreference.secondary()],
                 [ServerType.REPLICA_SET_PRIMARY, ServerType.SHARD_ROUTER],
                 [ClusterConnectionMode.SINGLE, ClusterConnectionMode.MULTIPLE],
                 [
-                        Stub(SessionContext) {
-                            hasSession() >> false
-                            getClusterTime() >> null
-                            getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
-                            getReadConcern() >> ReadConcern.DEFAULT
-                        },
-                        Stub(SessionContext) {
-                            hasSession() >> false
-                            getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1))
-                            getReadConcern() >> ReadConcern.DEFAULT
-                        },
-                        Stub(SessionContext) {
-                            hasSession() >> true
-                            getClusterTime() >> null
-                            getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
-                            getReadConcern() >> ReadConcern.DEFAULT
-                        },
-                        Stub(SessionContext) {
-                            hasSession() >> true
-                            getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1))
-                            getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
-                            getReadConcern() >> ReadConcern.DEFAULT
-                            }
+                        new OperationContext(
+                                IgnorableRequestContext.INSTANCE,
+                                Stub(SessionContext) {
+                                    hasSession() >> false
+                                    getClusterTime() >> null
+                                    getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
+                                    getReadConcern() >> ReadConcern.DEFAULT
+                                }, Stub(TimeoutContext), null),
+                        new OperationContext(
+                                IgnorableRequestContext.INSTANCE,
+                                Stub(SessionContext) {
+                                    hasSession() >> false
+                                    getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1))
+                                    getReadConcern() >> ReadConcern.DEFAULT
+                                }, Stub(TimeoutContext), null),
+                        new OperationContext(
+                                IgnorableRequestContext.INSTANCE,
+                                Stub(SessionContext) {
+                                    hasSession() >> true
+                                    getClusterTime() >> null
+                                    getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
+                                    getReadConcern() >> ReadConcern.DEFAULT
+                                }, Stub(TimeoutContext), null),
+                        new OperationContext(
+                                IgnorableRequestContext.INSTANCE,
+                                Stub(SessionContext) {
+                                    hasSession() >> true
+                                    getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1))
+                                    getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[]))
+                                    getReadConcern() >> ReadConcern.DEFAULT
+                                }, Stub(TimeoutContext), null)
                 ],
+                [true, false],
                 [true, false]
         ].combinations()
     }
@@ -141,7 +152,8 @@ class CommandMessageSpecification extends Specification {
                 MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, payload, new NoOpFieldNameValidator(),
                 ClusterConnectionMode.MULTIPLE, null)
         def output = new ByteBufferBsonOutput(new SimpleBufferProvider())
-        message.encode(output, NoOpSessionContext.INSTANCE)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                Stub(TimeoutContext), null))
 
         when:
         def commandDocument = message.getCommandDocument(output)
@@ -190,7 +202,8 @@ class CommandMessageSpecification extends Specification {
         }
 
         when:
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext,
+                Stub(TimeoutContext), null))
         def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         def messageHeader = new MessageHeader(byteBuf, maxMessageSize)
 
@@ -208,7 +221,7 @@ class CommandMessageSpecification extends Specification {
         message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings,
                 false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null)
         output.truncateToPosition(0)
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null))
         byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         messageHeader = new MessageHeader(byteBuf, maxMessageSize)
 
@@ -226,7 +239,7 @@ class CommandMessageSpecification extends Specification {
         message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings,
                 false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null)
         output.truncateToPosition(0)
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null))
         byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         messageHeader = new MessageHeader(byteBuf, maxMessageSize)
 
@@ -244,7 +257,10 @@ class CommandMessageSpecification extends Specification {
         message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings,
                 false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null)
         output.truncateToPosition(0)
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE,
+                sessionContext,
+                Stub(TimeoutContext),
+                null))
         byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         messageHeader = new MessageHeader(byteBuf, maxMessageSize)
 
@@ -273,7 +289,9 @@ class CommandMessageSpecification extends Specification {
         }
 
         when:
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext,
+                Stub(TimeoutContext),
+                null))
         def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         def messageHeader = new MessageHeader(byteBuf, 2048)
 
@@ -291,7 +309,8 @@ class CommandMessageSpecification extends Specification {
         message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings,
                 false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null)
         output.truncateToPosition(0)
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext,
+                Stub(TimeoutContext), null))
         byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray()))
         messageHeader = new MessageHeader(byteBuf, 1024)
 
@@ -318,7 +337,8 @@ class CommandMessageSpecification extends Specification {
         }
 
         when:
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext,
+                Stub(TimeoutContext), null))
 
         then:
         thrown(BsonMaximumSizeExceededException)
@@ -338,7 +358,8 @@ class CommandMessageSpecification extends Specification {
         }
 
         when:
-        message.encode(output, sessionContext)
+        message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext,
+                Stub(TimeoutContext), null))
 
         then:
         thrown(MongoClientException)
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java
new file mode 100644
index 00000000000..f08086be5e8
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.connection;
+
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.connection.ClusterConnectionMode;
+import com.mongodb.connection.ServerType;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.session.SessionContext;
+import com.mongodb.internal.validator.NoOpFieldNameValidator;
+import org.bson.BsonDocument;
+import org.bson.BsonString;
+import org.bson.BsonTimestamp;
+import org.bson.FieldNameValidator;
+import org.bson.io.BasicOutputBuffer;
+import org.junit.jupiter.api.Test;
+
+import static com.mongodb.internal.mockito.MongoMockito.mock;
+import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.verifyNoInteractions;
+import static org.mockito.Mockito.when;
+
+class CommandMessageTest {
+
+    private static final MongoNamespace NAMESPACE = new MongoNamespace("db.test");
+    private static final BsonDocument COMMAND = new BsonDocument("find", new BsonString(NAMESPACE.getCollectionName()));
+    private static final FieldNameValidator FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator();
+
+    @Test
+    void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() {
+        //given
+        CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(),
+                MessageSettings.builder()
+                        .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION)
+                        .serverType(ServerType.REPLICA_SET_SECONDARY)
+                        .sessionSupported(true)
+                        .build(),
+                true, null, null, ClusterConnectionMode.MULTIPLE, null);
+
+        BasicOutputBuffer bsonOutput = new BasicOutputBuffer();
+        SessionContext sessionContext = mock(SessionContext.class);
+        TimeoutContext timeoutContext = mock(TimeoutContext.class, mock -> {
+            doThrow(new MongoOperationTimeoutException("test")).when(mock).runMaxTimeMS(any());
+        });
+        OperationContext operationContext = mock(OperationContext.class, mock -> {
+            when(mock.getSessionContext()).thenReturn(sessionContext);
+            when(mock.getTimeoutContext()).thenReturn(timeoutContext);
+        });
+
+        //when & then
+        assertThrows(MongoOperationTimeoutException.class, () ->
+                commandMessage.encode(bsonOutput, operationContext));
+    }
+
+    @Test
+    void encodeShouldNotAddExtraElementsFromTimeoutContextWhenConnectedToMongoCrypt() {
+        //given
+        CommandMessage commandMessage = new CommandMessage(NAMESPACE, COMMAND, FIELD_NAME_VALIDATOR, ReadPreference.primary(),
+                MessageSettings.builder()
+                        .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION)
+                        .serverType(ServerType.REPLICA_SET_SECONDARY)
+                        .sessionSupported(true)
+                        .cryptd(true)
+                        .build(),
+                true, null, null, ClusterConnectionMode.MULTIPLE, null);
+
+        BasicOutputBuffer bsonOutput = new BasicOutputBuffer();
+        SessionContext sessionContext = mock(SessionContext.class, mock -> {
+            when(mock.getClusterTime()).thenReturn(new BsonDocument("clusterTime", new BsonTimestamp(42, 1)));
+            when(mock.hasSession()).thenReturn(false);
+            when(mock.getReadConcern()).thenReturn(ReadConcern.DEFAULT);
+            when(mock.notifyMessageSent()).thenReturn(true);
+            when(mock.hasActiveTransaction()).thenReturn(false);
+            when(mock.isSnapshot()).thenReturn(false);
+        });
+        TimeoutContext timeoutContext = mock(TimeoutContext.class);
+        OperationContext operationContext = mock(OperationContext.class, mock -> {
+            when(mock.getSessionContext()).thenReturn(sessionContext);
+            when(mock.getTimeoutContext()).thenReturn(timeoutContext);
+        });
+
+        //when
+        commandMessage.encode(bsonOutput, operationContext);
+
+        //then
+        verifyNoInteractions(timeoutContext);
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java
index b8574081f5c..1006b10665b 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java
@@ -45,7 +45,7 @@ protected Callable<Exception> createCallable(final BsonDocument operation) {
             FutureResultCallback<InternalConnection> callback = new FutureResultCallback<>();
             return () -> {
                 try {
-                    getPool().getAsync(new OperationContext(), (connection, t) -> {
+                    getPool().getAsync(createOperationContext(), (connection, t) -> {
                         if (t != null) {
                             callback.onResult(null, t);
                         } else {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java
index b5b449c755d..425a5da0fcb 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java
@@ -43,7 +43,7 @@ protected Callable<Exception> createCallable(final BsonDocument operation) {
         if (name.equals("checkOut")) {
             return () -> {
                 try {
-                    InternalConnection connection = getPool().get(new OperationContext());
+                    InternalConnection connection = getPool().get(createOperationContext());
                     if (operation.containsKey("label")) {
                         getConnectionMap().put(operation.getString("label").getValue(), connection);
                     }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy
index ecbdb2c55ab..fe251d34311 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy
@@ -16,6 +16,7 @@
 
 package com.mongodb.internal.connection
 
+import com.mongodb.ClusterFixture
 import com.mongodb.MongoConnectionPoolClearedException
 import com.mongodb.MongoServerUnavailableException
 import com.mongodb.MongoTimeoutException
@@ -26,7 +27,6 @@ import com.mongodb.connection.ConnectionId
 import com.mongodb.connection.ServerId
 import com.mongodb.event.ConnectionCheckOutFailedEvent
 import com.mongodb.event.ConnectionPoolListener
-import com.mongodb.internal.async.SingleResultCallback
 import com.mongodb.internal.inject.EmptyProvider
 import com.mongodb.internal.inject.SameObjectProvider
 import com.mongodb.internal.logging.LogMessage
@@ -41,6 +41,10 @@ import java.util.concurrent.CountDownLatch
 import java.util.regex.Matcher
 import java.util.regex.Pattern
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
+import static com.mongodb.ClusterFixture.createOperationContext
 import static com.mongodb.connection.ConnectionPoolSettings.builder
 import static java.util.concurrent.TimeUnit.MILLISECONDS
 import static java.util.concurrent.TimeUnit.MINUTES
@@ -70,22 +74,22 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should get non null connection'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         expect:
-        pool.get(new OperationContext()) != null
+        pool.get(OPERATION_CONTEXT) != null
     }
 
     def 'should reuse released connection'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
-        pool.get(new OperationContext()).close()
-        pool.get(new OperationContext())
+        pool.get(OPERATION_CONTEXT).close()
+        pool.get(OPERATION_CONTEXT)
 
         then:
         1 * connectionFactory.create(SERVER_ID, _)
@@ -94,11 +98,11 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should release a connection back into the pool on close, not close the underlying connection'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
-        pool.get(new OperationContext()).close()
+        pool.get(OPERATION_CONTEXT).close()
 
         then:
         !connectionFactory.getCreatedConnections().get(0).isClosed()
@@ -107,17 +111,17 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should throw if pool is exhausted'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).maxWaitTime(1, MILLISECONDS).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
-        def first = pool.get(new OperationContext())
+        def first = pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)))
 
         then:
         first != null
 
         when:
-        pool.get(new OperationContext())
+        pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)))
 
         then:
         thrown(MongoTimeoutException)
@@ -126,12 +130,14 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should throw on timeout'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).maxWaitTime(50, MILLISECONDS).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        pool.get(new OperationContext())
+
+        def timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)
+        pool.get(createOperationContext(timeoutSettings))
 
         when:
-        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool)
+        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings)
         new Thread(connectionGetter).start()
 
         connectionGetter.latch.await()
@@ -143,7 +149,7 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should have size of 0 with default settings'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(10).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider())
+                builder().maxSize(10).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
@@ -157,7 +163,8 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should ensure min pool size after maintenance task runs'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(10).minSize(5).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider())
+                builder().maxSize(10).minSize(5).maintenanceInitialDelay(5, MINUTES).build(),
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when: 'the maintenance tasks runs'
@@ -187,7 +194,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build()
 
         when:
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         then:
         1 * listener.connectionPoolCreated { it.serverId == SERVER_ID && it.settings == settings }
@@ -197,7 +204,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build()
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         when:
         pool.close()
 
@@ -209,11 +216,11 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         pool.ready()
-        pool.get(new OperationContext())
+        pool.get(OPERATION_CONTEXT)
 
         then:
         1 * listener.connectionCreated { it.connectionId.serverId == SERVER_ID }
@@ -234,7 +241,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         connection.opened() >> false
 
         when: 'connection pool is created'
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         then: '"pool is created" log message is emitted'
         def poolCreatedLogMessage = getMessage("Connection pool created")
         "Connection pool created for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()} using options " +
@@ -250,7 +257,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         "Connection pool ready for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == poolReadyLogMessage
 
         when: 'connection is created'
-        pool.get(new OperationContext())
+        pool.get(OPERATION_CONTEXT)
         then: '"connection created" and "connection ready" log messages are emitted'
         def createdLogMessage = getMessage( "Connection created")
         def readyLogMessage = getMessage("Connection ready")
@@ -260,7 +267,7 @@ class DefaultConnectionPoolSpecification extends Specification {
                 ", driver-generated ID=${driverConnectionId}, established in=\\d+ ms"
 
         when: 'connection is released back into the pool on close'
-        pool.get(new OperationContext()).close()
+        pool.get(OPERATION_CONTEXT).close()
         then: '"connection check out" and "connection checked in" log messages are emitted'
         def checkoutStartedMessage = getMessage("Connection checkout started")
         def connectionCheckedInMessage = getMessage("Connection checked in")
@@ -295,7 +302,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         "Connection pool closed for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}"  == poolClosedLogMessage
 
         when: 'connection checked out on closed pool'
-        pool.get(new OperationContext())
+        pool.get(OPERATION_CONTEXT)
         then:
         thrown(MongoServerUnavailableException)
         def connectionCheckoutFailedInMessage = getMessage("Connection checkout failed")
@@ -316,12 +323,14 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should log on checkout timeout fail'() throws InterruptedException {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).maxWaitTime(50, MILLISECONDS).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        pool.get(new OperationContext())
+
+        def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)
+        pool.get(createOperationContext(timeoutSettings))
 
         when:
-        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool)
+        TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings)
         new Thread(connectionGetter).start()
         connectionGetter.latch.await()
 
@@ -337,11 +346,12 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should log on connection become idle'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(2).minSize(0).maxConnectionIdleTime(1, MILLISECONDS).build(), mockSdamProvider())
+                builder().maxSize(2).minSize(0).maxConnectionIdleTime(1, MILLISECONDS).build(),
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         pool.ready()
-        pool.get(new OperationContext()).close()
+        pool.get(OPERATION_CONTEXT).close()
         //not cool - but we have no way of waiting for connection to become idle
         Thread.sleep(500)
         pool.close();
@@ -362,7 +372,7 @@ class DefaultConnectionPoolSpecification extends Specification {
                 builder().maxSize(1)
                         .minSize(0)
                         .maxConnectionIdleTime(1, MILLISECONDS)
-                        .build(), EmptyProvider.instance())
+                        .build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY)
 
         when:
         pool.ready()
@@ -380,15 +390,15 @@ class DefaultConnectionPoolSpecification extends Specification {
         def connection = Mock(InternalConnection)
         connection.getDescription() >> new ConnectionDescription(SERVER_ID)
         connection.opened() >> false
-        connection.open() >> { throw new UncheckedIOException('expected failure', new IOException()) }
+        connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) }
         connectionFactory.create(SERVER_ID, _) >> connection
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(),
-                mockSdamProvider())
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
         try {
-            pool.get(new OperationContext())
+            pool.get(OPERATION_CONTEXT)
         } catch (UncheckedIOException e) {
             if ('expected failure' != e.getMessage()) {
                 throw e
@@ -408,7 +418,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         pool.ready()
@@ -423,9 +433,9 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        def connection = pool.get(new OperationContext())
+        def connection = pool.get(OPERATION_CONTEXT)
         connection.close()
 
         when:
@@ -439,7 +449,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
         def connection = selectConnectionAsyncAndGet(pool)
         connection.close()
@@ -455,13 +465,13 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        def connection = pool.get(new OperationContext())
+        def connection = pool.get(OPERATION_CONTEXT)
         connection.close()
 
         when:
-        connection = pool.get(new OperationContext())
+        connection = pool.get(OPERATION_CONTEXT)
 
         then:
         1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID }
@@ -477,13 +487,13 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
         def connection = selectConnectionAsyncAndGet(pool)
         connection.close()
 
         when:
-        connection = pool.get(new OperationContext())
+        connection = pool.get(OPERATION_CONTEXT)
 
         then:
         1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID }
@@ -501,15 +511,15 @@ class DefaultConnectionPoolSpecification extends Specification {
         def connection = Mock(InternalConnection)
         connection.getDescription() >> new ConnectionDescription(SERVER_ID)
         connection.opened() >> false
-        connection.open() >> { throw new UncheckedIOException('expected failure', new IOException()) }
+        connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) }
         connectionFactory.create(SERVER_ID, _) >> connection
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(),
-                mockSdamProvider())
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
         try {
-            pool.get(new OperationContext())
+            pool.get(OPERATION_CONTEXT)
         } catch (UncheckedIOException e) {
             if ('expected failure' != e.getMessage()) {
                 throw e
@@ -526,12 +536,12 @@ class DefaultConnectionPoolSpecification extends Specification {
         def connection = Mock(InternalConnection)
         connection.getDescription() >> new ConnectionDescription(SERVER_ID)
         connection.opened() >> false
-        connection.openAsync(_) >> { SingleResultCallback<Void> callback ->
-            callback.onResult(null, new UncheckedIOException('expected failure', new IOException()))
+        connection.openAsync(_, _) >> {
+            it.last().onResult(null, new UncheckedIOException('expected failure', new IOException()))
         }
         connectionFactory.create(SERVER_ID, _) >> connection
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(),
-                mockSdamProvider())
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
@@ -549,12 +559,12 @@ class DefaultConnectionPoolSpecification extends Specification {
 
     def 'should fire MongoConnectionPoolClearedException when checking out in paused state'() {
         given:
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         Throwable caught = null
 
         when:
         try {
-            pool.get(new OperationContext())
+            pool.get(OPERATION_CONTEXT)
         } catch (MongoConnectionPoolClearedException e) {
             caught = e
         }
@@ -565,11 +575,11 @@ class DefaultConnectionPoolSpecification extends Specification {
 
     def 'should fire MongoConnectionPoolClearedException when checking out asynchronously in paused state'() {
         given:
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         CompletableFuture<Throwable> caught = new CompletableFuture<>()
 
         when:
-        pool.getAsync(new OperationContext()) { InternalConnection result, Throwable t ->
+        pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable t ->
             if (t != null) {
                 caught.complete(t)
             }
@@ -582,14 +592,14 @@ class DefaultConnectionPoolSpecification extends Specification {
 
     def 'invalidate should record cause'() {
         given:
-        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider())
+        pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         RuntimeException cause = new RuntimeException()
         Throwable caught = null
 
         when:
         pool.invalidate(cause)
         try {
-            pool.get(new OperationContext())
+            pool.get(OPERATION_CONTEXT)
         } catch (MongoConnectionPoolClearedException e) {
             caught = e
         }
@@ -602,7 +612,7 @@ class DefaultConnectionPoolSpecification extends Specification {
         given:
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(),
-                mockSdamProvider())
+                mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         pool.ready()
@@ -618,9 +628,9 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should continue to fire events after pool is closed'() {
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        def connection = pool.get(new OperationContext())
+        def connection = pool.get(OPERATION_CONTEXT)
         pool.close()
 
         when:
@@ -634,7 +644,7 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should continue to fire events after pool is closed (asynchronous)'() {
         def listener = Mock(ConnectionPoolListener)
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1)
-                .addConnectionPoolListener(listener).build(), mockSdamProvider())
+                .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
         def connection = selectConnectionAsyncAndGet(pool)
         pool.close()
@@ -650,7 +660,7 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should select connection asynchronously if one is immediately available'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         expect:
@@ -660,11 +670,11 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'should select connection asynchronously if one is not immediately available'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
 
         when:
-        def connection = pool.get(new OperationContext())
+        def connection = pool.get(OPERATION_CONTEXT)
         def connectionLatch = selectConnectionAsync(pool)
         connection.close()
 
@@ -675,9 +685,9 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'when getting a connection asynchronously should send MongoTimeoutException to callback after timeout period'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).maxWaitTime(5, MILLISECONDS).build(), mockSdamProvider())
+                builder().maxSize(1).maxWaitTime(5, MILLISECONDS).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.ready()
-        pool.get(new OperationContext())
+        pool.get(OPERATION_CONTEXT)
         def firstConnectionLatch = selectConnectionAsync(pool)
         def secondConnectionLatch = selectConnectionAsync(pool)
 
@@ -697,7 +707,7 @@ class DefaultConnectionPoolSpecification extends Specification {
     def 'invalidate should do nothing when pool is closed'() {
         given:
         pool = new DefaultConnectionPool(SERVER_ID, connectionFactory,
-                builder().maxSize(1).build(), mockSdamProvider())
+                builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         pool.close()
 
         when:
@@ -713,7 +723,7 @@ class DefaultConnectionPoolSpecification extends Specification {
 
     def selectConnectionAsync(DefaultConnectionPool pool) {
         def serverLatch = new ConnectionLatch()
-        pool.getAsync(new OperationContext()) { InternalConnection result, Throwable e ->
+        pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable e ->
             serverLatch.connection = result
             serverLatch.throwable = e
             serverLatch.latch.countDown()
@@ -742,5 +752,4 @@ class DefaultConnectionPoolSpecification extends Specification {
             connection
         }
     }
-
 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy
index eb27b23fdfb..5b894c7a735 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy
@@ -16,42 +16,25 @@
 
 package com.mongodb.internal.connection
 
-import com.mongodb.MongoNamespace
+
 import com.mongodb.ReadPreference
-import com.mongodb.ServerAddress
 import com.mongodb.connection.ClusterConnectionMode
-import com.mongodb.connection.ClusterId
-import com.mongodb.connection.ConnectionDescription
-import com.mongodb.connection.ConnectionId
-import com.mongodb.connection.ServerId
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.async.SingleResultCallback
-import com.mongodb.internal.binding.StaticBindingContext
 import com.mongodb.internal.diagnostics.logging.Logger
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonDocument
 import org.bson.BsonInt32
 import org.bson.codecs.BsonDocumentCodec
-import spock.lang.Shared
 import spock.lang.Specification
 
-import static com.mongodb.ClusterFixture.getServerApi
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.CustomMatchers.compare
-import static com.mongodb.connection.ServerType.SHARD_ROUTER
-import static com.mongodb.connection.ServerType.STANDALONE
 import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback
 import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER
 
 class DefaultServerConnectionSpecification extends Specification {
-    def namespace = new MongoNamespace('test', 'test')
     def internalConnection = Mock(InternalConnection)
     def callback = errorHandlingCallback(Mock(SingleResultCallback), Mock(Logger))
-    @Shared
-    def standaloneConnectionDescription = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
-            3, STANDALONE, 100, 100, 100, [])
-    @Shared
-    def mongosConnectionDescription = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())),
-            3, SHARD_ROUTER, 100, 100, 100, [])
 
     def 'should execute command protocol asynchronously'() {
         given:
@@ -60,16 +43,14 @@ class DefaultServerConnectionSpecification extends Specification {
         def codec = new BsonDocumentCodec()
         def executor = Mock(ProtocolExecutor)
         def connection = new DefaultServerConnection(internalConnection, executor, ClusterConnectionMode.MULTIPLE)
-        def operationContext = new OperationContext()
-        def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, getServerApi(), IgnorableRequestContext.INSTANCE,
-                operationContext)
+
         when:
-        connection.commandAsync('test', command, validator, ReadPreference.primary(), codec, context, callback)
+        connection.commandAsync('test', command, validator, ReadPreference.primary(), codec, OPERATION_CONTEXT, callback)
 
         then:
         1 * executor.executeAsync({
             compare(new CommandProtocolImpl('test', command, validator, ReadPreference.primary(), codec, true, null, null,
-                    ClusterConnectionMode.MULTIPLE, getServerApi(), IgnorableRequestContext.INSTANCE, operationContext), it)
-        }, internalConnection, NoOpSessionContext.INSTANCE, callback)
+                    ClusterConnectionMode.MULTIPLE, OPERATION_CONTEXT), it)
+        }, internalConnection, OPERATION_CONTEXT.getSessionContext(), callback)
     }
 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy
index 42626a46d9c..c452d757a28 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy
@@ -39,6 +39,7 @@ import java.nio.ByteBuffer
 import java.util.concurrent.CountDownLatch
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY
 import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER
 
 @SuppressWarnings('BusyWait')
@@ -79,12 +80,14 @@ class DefaultServerMonitorSpecification extends Specification {
         def internalConnectionFactory = Mock(InternalConnectionFactory) {
             create(_) >> {
                 Mock(InternalConnection) {
-                    open() >> { sleep(100) }
+                    open(_) >> { sleep(100) }
                 }
             }
         }
         monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()), ServerSettings.builder().build(),
-                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, SameObjectProvider.initialized(sdam))
+                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, SameObjectProvider.initialized(sdam),
+                OPERATION_CONTEXT_FACTORY)
+
         monitor.start()
 
         when:
@@ -143,7 +146,7 @@ class DefaultServerMonitorSpecification extends Specification {
         def internalConnectionFactory = Mock(InternalConnectionFactory) {
             create(_) >> {
                 Mock(InternalConnection) {
-                    open() >> { }
+                    open(_) >> { }
 
                     getBuffer(_) >> { int size ->
                         new ByteBufNIO(ByteBuffer.allocate(size))
@@ -167,7 +170,7 @@ class DefaultServerMonitorSpecification extends Specification {
         }
         monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()),
                 ServerSettings.builder().heartbeatFrequency(1, TimeUnit.SECONDS).addServerMonitorListener(serverMonitorListener).build(),
-                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider())
+                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         monitor.start()
@@ -222,7 +225,7 @@ class DefaultServerMonitorSpecification extends Specification {
         def internalConnectionFactory = Mock(InternalConnectionFactory) {
             create(_) >> {
                 Mock(InternalConnection) {
-                    open() >> { }
+                    open(_) >> { }
 
                     getBuffer(_) >> { int size ->
                         new ByteBufNIO(ByteBuffer.allocate(size))
@@ -246,7 +249,7 @@ class DefaultServerMonitorSpecification extends Specification {
         }
         monitor = new DefaultServerMonitor(new ServerId(new ClusterId(), new ServerAddress()),
                 ServerSettings.builder().heartbeatFrequency(1, TimeUnit.SECONDS).addServerMonitorListener(serverMonitorListener).build(),
-                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider())
+                internalConnectionFactory, ClusterConnectionMode.SINGLE, null, false, mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         monitor.start()
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy
index a0b96706f0e..f054457b877 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy
@@ -36,11 +36,11 @@ import com.mongodb.connection.ServerType
 import com.mongodb.event.CommandListener
 import com.mongodb.event.ServerDescriptionChangedEvent
 import com.mongodb.event.ServerListener
-import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.async.SingleResultCallback
-import com.mongodb.internal.binding.StaticBindingContext
 import com.mongodb.internal.inject.SameObjectProvider
 import com.mongodb.internal.session.SessionContext
+import com.mongodb.internal.time.Timeout
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonDocument
 import org.bson.BsonInt32
@@ -50,7 +50,7 @@ import spock.lang.Specification
 
 import java.util.concurrent.CountDownLatch
 
-import static com.mongodb.ClusterFixture.getServerApi
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.MongoCredential.createCredential
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
@@ -71,7 +71,7 @@ class DefaultServerSpecification extends Specification {
                 Mock(SdamServerDescriptionManager), Mock(ServerListener), Mock(CommandListener), new ClusterClock(), false)
 
         when:
-        def receivedConnection = server.getConnection(new OperationContext())
+        def receivedConnection = server.getConnection(OPERATION_CONTEXT)
 
         then:
         receivedConnection
@@ -97,7 +97,7 @@ class DefaultServerSpecification extends Specification {
 
         when:
         def callback = new SupplyingCallback<AsyncConnection>()
-        server.getConnectionAsync(new OperationContext(), callback)
+        server.getConnectionAsync(OPERATION_CONTEXT, callback)
 
         then:
         callback.get() == connection
@@ -114,7 +114,7 @@ class DefaultServerSpecification extends Specification {
         server.close()
 
         when:
-        server.getConnection(new OperationContext())
+        server.getConnection(OPERATION_CONTEXT)
 
         then:
         def ex = thrown(MongoServerUnavailableException)
@@ -124,7 +124,7 @@ class DefaultServerSpecification extends Specification {
         def latch = new CountDownLatch(1)
         def receivedConnection = null
         def receivedThrowable = null
-        server.getConnectionAsync(new OperationContext()) {
+        server.getConnectionAsync(OPERATION_CONTEXT) {
             result, throwable ->
                 receivedConnection = result; receivedThrowable = throwable; latch.countDown()
         }
@@ -166,7 +166,7 @@ class DefaultServerSpecification extends Specification {
         given:
         def connectionPool = Mock(ConnectionPool)
         def serverMonitor = Mock(ServerMonitor)
-        connectionPool.get(new OperationContext()) >> { throw exceptionToThrow }
+        connectionPool.get(OPERATION_CONTEXT) >> { throw exceptionToThrow }
 
         def server = defaultServer(connectionPool, serverMonitor)
         server.close()
@@ -187,7 +187,7 @@ class DefaultServerSpecification extends Specification {
         def server = defaultServer(connectionPool, serverMonitor)
 
         when:
-        server.getConnection(new OperationContext())
+        server.getConnection(OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoException)
@@ -212,7 +212,7 @@ class DefaultServerSpecification extends Specification {
         def server = defaultServer(connectionPool, serverMonitor)
 
         when:
-        server.getConnection(new OperationContext())
+        server.getConnection(OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoSecurityException)
@@ -237,7 +237,7 @@ class DefaultServerSpecification extends Specification {
         def latch = new CountDownLatch(1)
         def receivedConnection = null
         def receivedThrowable = null
-        server.getConnectionAsync(new OperationContext()) {
+        server.getConnectionAsync(OPERATION_CONTEXT) {
             result, throwable ->
                 receivedConnection = result; receivedThrowable = throwable; latch.countDown()
         }
@@ -270,7 +270,7 @@ class DefaultServerSpecification extends Specification {
         def latch = new CountDownLatch(1)
         def receivedConnection = null
         def receivedThrowable = null
-        server.getConnectionAsync(new OperationContext()) {
+        server.getConnectionAsync(OPERATION_CONTEXT) {
             result, throwable ->
                 receivedConnection = result; receivedThrowable = throwable; latch.countDown()
         }
@@ -306,19 +306,19 @@ class DefaultServerSpecification extends Specification {
                           ''')
         def protocol = new TestCommandProtocol(response)
         testConnection.enqueueProtocol(protocol)
-        def context = new StaticBindingContext(sessionContext, getServerApi(), IgnorableRequestContext.INSTANCE, new OperationContext())
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
 
         when:
         if (async) {
             CountDownLatch latch = new CountDownLatch(1)
             testConnection.commandAsync('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR,
-                    ReadPreference.primary(), new BsonDocumentCodec(), context) {
+                    ReadPreference.primary(), new BsonDocumentCodec(), operationContext) {
                 BsonDocument result, Throwable t -> latch.countDown()
             }
             latch.await()
         } else {
             testConnection.command('admin', new BsonDocument('ping', new BsonInt32(1)), NO_OP_FIELD_NAME_VALIDATOR,
-                    ReadPreference.primary(), new BsonDocumentCodec(), context)
+                    ReadPreference.primary(), new BsonDocumentCodec(), operationContext)
         }
 
         then:
@@ -379,7 +379,7 @@ class DefaultServerSpecification extends Specification {
         }
 
         @Override
-        TestCommandProtocol sessionContext(final SessionContext sessionContext) {
+        TestCommandProtocol withSessionContext(final SessionContext sessionContext) {
             contextClusterTime = sessionContext.clusterTime
             sessionContext.advanceClusterTime(responseDocument.getDocument('$clusterTime'))
             sessionContext.advanceOperationTime(responseDocument.getTimestamp('operationTime'))
@@ -394,7 +394,7 @@ class DefaultServerSpecification extends Specification {
             }
 
             @Override
-            Cluster.ServersSnapshot getServersSnapshot() {
+            Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) {
                 Cluster.ServersSnapshot result = {
                     serverAddress -> throw new UnsupportedOperationException()
                 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy
index 921e9fc045b..802cf044aac 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy
@@ -36,8 +36,8 @@ import java.util.concurrent.TimeUnit
 
 import static com.mongodb.internal.connection.DescriptionHelper.createConnectionDescription
 import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription
-import static org.bson.BsonDocument.parse
 import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER
+import static org.bson.BsonDocument.parse
 
 class DescriptionHelperSpecification extends Specification {
     private final ServerAddress serverAddress = new ServerAddress('localhost', 27018)
@@ -150,7 +150,7 @@ class DescriptionHelperSpecification extends Specification {
     def 'server description should reflect not ok legacy hello result'() {
         expect:
         createServerDescription(serverAddress,
-                                parse('{ok : 0}'), roundTripTime) ==
+                                parse('{ok : 0}'), roundTripTime, 0) ==
                 ServerDescription.builder()
                          .ok(false)
                          .address(serverAddress)
@@ -162,7 +162,7 @@ class DescriptionHelperSpecification extends Specification {
     def 'server description should reflect last update time'() {
         expect:
         createServerDescription(serverAddress,
-                parse('{ ok : 1 }'), roundTripTime).getLastUpdateTime(TimeUnit.NANOSECONDS) == Time.CONSTANT_TIME
+                parse('{ ok : 1 }'), roundTripTime, 0).getLastUpdateTime(TimeUnit.NANOSECONDS) == Time.CONSTANT_TIME
     }
 
     def 'server description should reflect roundTripNanos'() {
@@ -177,7 +177,7 @@ class DescriptionHelperSpecification extends Specification {
                                       maxWireVersion : 3,
                                       minWireVersion : 0,
                                       ok : 1
-                                      }"""), roundTripTime).roundTripTimeNanos ==
+                                      }"""), roundTripTime, 0).roundTripTimeNanos ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddress)
@@ -201,7 +201,7 @@ class DescriptionHelperSpecification extends Specification {
                         maxWireVersion : 3,
                         minWireVersion : 0,
                         ok : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddress)
@@ -235,7 +235,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(new ServerAddress('localhost', 27018))
@@ -274,7 +274,7 @@ class DescriptionHelperSpecification extends Specification {
                         "minWireVersion" : 0,
                         "lastWrite" : { "lastWriteDate" : ISODate("2016-03-04T23:14:07.338Z") }
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
                 ServerDescription.builder()
                         .ok(true)
                         .address(new ServerAddress('localhost', 27018))
@@ -326,7 +326,7 @@ class DescriptionHelperSpecification extends Specification {
                         "setVersion" : 2,
                         tags : { "dc" : "east", "use" : "production" }
                         "ok" : 1
-                        }"""), roundTripTime)
+                        }"""), roundTripTime, 0)
 
         then:
         serverDescription ==
@@ -374,7 +374,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddress)
@@ -418,7 +418,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime)
+                        }"""), roundTripTime, 0)
 
         then:
         serverDescription ==
@@ -466,7 +466,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddressOfHidden)
@@ -499,7 +499,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddress)
@@ -525,7 +525,7 @@ class DescriptionHelperSpecification extends Specification {
                         "maxWireVersion" : 3,
                         "minWireVersion" : 0,
                         "ok" : 1
-                        }"""), roundTripTime) ==
+                        }"""), roundTripTime, 0) ==
         ServerDescription.builder()
                          .ok(true)
                          .address(serverAddress)
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy
deleted file mode 100644
index 514499c86b4..00000000000
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageSpecification.groovy
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2008-present MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.mongodb.internal.connection
-
-import spock.lang.Specification
-
-
-class ExponentiallyWeightedMovingAverageSpecification extends Specification {
-
-    def 'constructor should throw if alpha is not between 0.0 and 1.0'() {
-        when:
-        new ExponentiallyWeightedMovingAverage(alpha)
-
-        then:
-        thrown(IllegalArgumentException)
-
-        where:
-        alpha << [-0.001, -0.01, -0.1, -1, 1.001, 1.01, 1.1]
-    }
-
-    def 'constructor should not throw if alpha is between 0.0 and 1.0'() {
-        when:
-        new ExponentiallyWeightedMovingAverage(alpha)
-
-        then:
-        true
-
-        where:
-        alpha << [-0.0, 0.01, 0.1, 0.001, 0.01, 0.1, 0.2, 1.0]
-    }
-
-    def 'the average should be exponentially weighted'() {
-        when:
-        def average = new ExponentiallyWeightedMovingAverage(alpha)
-        for (def sample : samples) {
-            average.addSample(sample)
-        }
-
-        then:
-        average.getAverage() == result
-
-        where:
-        alpha << [0.2, 0.2, 0.2, 0.2, 0.2]
-        samples << [[], [10], [10, 20], [10, 20, 12], [10, 20, 12, 17]]
-        result << [0, 10, 12, 12, 13]
-    }
-}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java
new file mode 100644
index 00000000000..59da49bfbe5
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.connection;
+
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.util.List;
+import java.util.stream.Stream;
+
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+
+public class ExponentiallyWeightedMovingAverageTest {
+
+    @ParameterizedTest(name = "{index}: {0}")
+    @ValueSource(doubles = {-0.001, -0.01, -0.1, -1, 1.001, 1.01, 1.1})
+    @DisplayName("constructor should throw if alpha is not between 0.0 and 1.0")
+    void testInvalidAlpha(final double alpha) {
+        assertThrows(IllegalArgumentException.class, () -> new ExponentiallyWeightedMovingAverage(alpha));
+    }
+
+    @ParameterizedTest(name = "{index}: {0}")
+    @ValueSource(doubles = {-0.0, 0.01, 0.1, 0.001, 0.01, 0.1, 0.2, 1.0})
+    @DisplayName("constructor should not throw if alpha is between 0.0 and 1.0")
+    void testValidAlpha(final double alpha) {
+        assertDoesNotThrow(() -> new ExponentiallyWeightedMovingAverage(alpha));
+    }
+
+
+    @ParameterizedTest(name = "{index}: samples: {1}. Expected: {2}")
+    @DisplayName("the average should be exponentially weighted")
+    @MethodSource
+    public void testAverageIsExponentiallyWeighted(final double alpha, final List<Integer> samples, final int expectedAverageRTT) {
+        ExponentiallyWeightedMovingAverage average = new ExponentiallyWeightedMovingAverage(alpha);
+        samples.forEach(average::addSample);
+
+        assertEquals(expectedAverageRTT, average.getAverage());
+    }
+
+    private static Stream<Arguments> testAverageIsExponentiallyWeighted() {
+        return Stream.of(
+                Arguments.of(0.2, emptyList(), 0),
+                Arguments.of(0.2, singletonList(10), 10),
+                Arguments.of(0.2, asList(10, 20), 12),
+                Arguments.of(0.2, asList(10, 20, 12), 12),
+                Arguments.of(0.2, asList(10, 20, 12, 17), 13)
+        );
+    }
+
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy
index c389e647be1..93bc656226a 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy
@@ -27,6 +27,7 @@ import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerId
 import com.mongodb.connection.ServerType
+import com.mongodb.internal.TimeoutSettings
 import org.bson.BsonArray
 import org.bson.BsonBoolean
 import org.bson.BsonDocument
@@ -47,11 +48,13 @@ import static com.mongodb.internal.connection.ClientMetadataHelperProseTest.crea
 import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO
 import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply
 import static com.mongodb.internal.connection.MessageHelper.decodeCommand
+import static com.mongodb.internal.connection.OperationContext.simpleOperationContext
 
 class InternalStreamConnectionInitializerSpecification extends Specification {
 
     def serverId = new ServerId(new ClusterId(), new ServerAddress())
     def internalConnection = new TestInternalConnection(serverId, ServerType.STANDALONE)
+    def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null)
 
     def 'should create correct description'() {
         given:
@@ -59,8 +62,8 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
 
         when:
         enqueueSuccessfulReplies(false, null)
-        def description = initializer.startHandshake(internalConnection)
-        description = initializer.finishHandshake(internalConnection, description)
+        def description = initializer.startHandshake(internalConnection, operationContext)
+        description = initializer.finishHandshake(internalConnection, description, operationContext)
         def connectionDescription = description.connectionDescription
         def serverDescription = description.serverDescription
 
@@ -76,10 +79,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         when:
         enqueueSuccessfulReplies(false, null)
         def futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.startHandshakeAsync(internalConnection, futureCallback)
+        initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback)
         def description = futureCallback.get()
         futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.finishHandshakeAsync(internalConnection, description, futureCallback)
+        initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback)
         description = futureCallback.get()
         def connectionDescription = description.connectionDescription
         def serverDescription = description.serverDescription
@@ -95,8 +98,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
 
         when:
         enqueueSuccessfulReplies(false, 123)
-        def internalDescription = initializer.startHandshake(internalConnection)
-        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription
+        def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext)
+                .connectionDescription
 
         then:
         connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123)
@@ -108,8 +112,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
 
         when:
         enqueueSuccessfulRepliesWithConnectionIdIsHelloResponse(false, 123)
-        def internalDescription = initializer.startHandshake(internalConnection)
-        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription
+        def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext)
+                .connectionDescription
 
         then:
         connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123)
@@ -122,10 +127,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         when:
         enqueueSuccessfulReplies(false, 123)
         def futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.startHandshakeAsync(internalConnection, futureCallback)
+        initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback)
         def description = futureCallback.get()
         futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.finishHandshakeAsync(internalConnection, description, futureCallback)
+        initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback)
         def connectionDescription = futureCallback.get().connectionDescription
 
         then:
@@ -139,10 +144,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         when:
         enqueueSuccessfulRepliesWithConnectionIdIsHelloResponse(false, 123)
         def futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.startHandshakeAsync(internalConnection, futureCallback)
+        initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback)
         def description = futureCallback.get()
         futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.finishHandshakeAsync(internalConnection, description, futureCallback)
+        initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback)
         description = futureCallback.get()
         def connectionDescription = description.connectionDescription
 
@@ -158,12 +163,13 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         when:
         enqueueSuccessfulReplies(false, null)
 
-        def internalDescription = initializer.startHandshake(internalConnection)
-        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription
+        def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext)
+                .connectionDescription
 
         then:
         connectionDescription
-        1 * firstAuthenticator.authenticate(internalConnection, _)
+        1 * firstAuthenticator.authenticate(internalConnection, _, _)
     }
 
     def 'should authenticate asynchronously'() {
@@ -175,15 +181,15 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         enqueueSuccessfulReplies(false, null)
 
         def futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.startHandshakeAsync(internalConnection, futureCallback)
+        initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback)
         def description = futureCallback.get()
         futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.finishHandshakeAsync(internalConnection, description, futureCallback)
+        initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback)
         def connectionDescription = futureCallback.get().connectionDescription
 
         then:
         connectionDescription
-        1 * authenticator.authenticateAsync(internalConnection, _, _) >> { it[2].onResult(null, null) }
+        1 * authenticator.authenticateAsync(internalConnection, _, _, _) >> { it[3].onResult(null, null) }
     }
 
     def 'should not authenticate if server is an arbiter'() {
@@ -194,12 +200,13 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         when:
         enqueueSuccessfulReplies(true, null)
 
-        def internalDescription = initializer.startHandshake(internalConnection)
-        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription).connectionDescription
+        def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+        def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext)
+                .connectionDescription
 
         then:
         connectionDescription
-        0 * authenticator.authenticate(internalConnection, _)
+        0 * authenticator.authenticate(internalConnection, _, _)
     }
 
     def 'should not authenticate asynchronously if server is an arbiter asynchronously'() {
@@ -211,10 +218,10 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         enqueueSuccessfulReplies(true, null)
 
         def futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.startHandshakeAsync(internalConnection, futureCallback)
+        initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback)
         def description = futureCallback.get()
         futureCallback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-        initializer.finishHandshakeAsync(internalConnection, description, futureCallback)
+        initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback)
         def connectionDescription = futureCallback.get().connectionDescription
 
         then:
@@ -236,14 +243,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         enqueueSuccessfulReplies(false, null)
         if (async) {
             def callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.startHandshakeAsync(internalConnection, callback)
+            initializer.startHandshakeAsync(internalConnection, operationContext, callback)
             def description = callback.get()
             callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.finishHandshakeAsync(internalConnection, description, callback)
+            initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback)
             callback.get()
         } else {
-            def internalDescription = initializer.startHandshake(internalConnection)
-            initializer.finishHandshake(internalConnection, internalDescription)
+            def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+            initializer.finishHandshake(internalConnection, internalDescription, operationContext)
         }
 
         then:
@@ -273,14 +280,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         enqueueSuccessfulReplies(false, null)
         if (async) {
             def callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.startHandshakeAsync(internalConnection, callback)
+            initializer.startHandshakeAsync(internalConnection, operationContext, callback)
             def description = callback.get()
             callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.finishHandshakeAsync(internalConnection, description, callback)
+            initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback)
             callback.get()
         } else {
-            def internalDescription = initializer.startHandshake(internalConnection)
-            initializer.finishHandshake(internalConnection, internalDescription)
+            def internalDescription = initializer.startHandshake(internalConnection, operationContext)
+            initializer.finishHandshake(internalConnection, internalDescription, operationContext)
         }
 
         then:
@@ -312,9 +319,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         then:
         description
         if (async) {
-            1 * scramShaAuthenticator.authenticateAsync(internalConnection, _, _)
+            1 * scramShaAuthenticator.authenticateAsync(internalConnection, _, _, _)
         } else {
-            1 * scramShaAuthenticator.authenticate(internalConnection, _)
+            1 * scramShaAuthenticator.authenticate(internalConnection, _, _)
         }
         1 * ((SpeculativeAuthenticator) scramShaAuthenticator).createSpeculativeAuthenticateCommand(_)
         ((SpeculativeAuthenticator) scramShaAuthenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse
@@ -343,9 +350,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         then:
         description
         if (async) {
-            1 * authenticator.authenticateAsync(internalConnection, _, _)
+            1 * authenticator.authenticateAsync(internalConnection, _, _, _)
         } else {
-            1 * authenticator.authenticate(internalConnection, _)
+            1 * authenticator.authenticate(internalConnection, _, _)
         }
         1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_)
         ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse
@@ -374,9 +381,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         then:
         description
         if (async) {
-            1 * authenticator.authenticateAsync(internalConnection, _, _)
+            1 * authenticator.authenticateAsync(internalConnection, _, _, _)
         } else {
-            1 * authenticator.authenticate(internalConnection, _)
+            1 * authenticator.authenticate(internalConnection, _, _)
         }
         1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_)
         ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse
@@ -402,9 +409,9 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
         then:
         description
         if (async) {
-            1 * authenticator.authenticateAsync(internalConnection, _, _)
+            1 * authenticator.authenticateAsync(internalConnection, _, _, _)
         } else {
-            1 * authenticator.authenticate(internalConnection, _)
+            1 * authenticator.authenticate(internalConnection, _, _)
         }
         1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_)
         ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse
@@ -444,14 +451,14 @@ class InternalStreamConnectionInitializerSpecification extends Specification {
                              final TestInternalConnection connection) {
         if (async) {
             def callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.startHandshakeAsync(internalConnection, callback)
+            initializer.startHandshakeAsync(internalConnection, operationContext, callback)
             def description = callback.get()
             callback = new FutureResultCallback<InternalConnectionInitializationDescription>()
-            initializer.finishHandshakeAsync(internalConnection, description, callback)
+            initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback)
             callback.get()
         } else {
-            def internalDescription = initializer.startHandshake(connection)
-            initializer.finishHandshake(connection, internalDescription)
+            def internalDescription = initializer.startHandshake(connection, operationContext)
+            initializer.finishHandshake(connection, internalDescription, operationContext)
         }
     }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy
index c0cd580e02e..7a0dca34526 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy
@@ -20,14 +20,15 @@ import com.mongodb.MongoCommandException
 import com.mongodb.MongoInternalException
 import com.mongodb.MongoInterruptedException
 import com.mongodb.MongoNamespace
+import com.mongodb.MongoOperationTimeoutException
 import com.mongodb.MongoSocketClosedException
 import com.mongodb.MongoSocketException
 import com.mongodb.MongoSocketReadException
+import com.mongodb.MongoSocketReadTimeoutException
 import com.mongodb.MongoSocketWriteException
 import com.mongodb.ReadConcern
 import com.mongodb.ServerAddress
 import com.mongodb.async.FutureResultCallback
-import com.mongodb.connection.AsyncCompletionHandler
 import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ConnectionId
@@ -39,14 +40,13 @@ import com.mongodb.event.CommandFailedEvent
 import com.mongodb.event.CommandStartedEvent
 import com.mongodb.event.CommandSucceededEvent
 import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils
-import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.session.SessionContext
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonDocument
 import org.bson.BsonInt32
 import org.bson.BsonReader
 import org.bson.BsonString
-import org.bson.ByteBuf
 import org.bson.ByteBufNIO
 import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DecoderContext
@@ -59,6 +59,8 @@ import java.util.concurrent.CountDownLatch
 import java.util.concurrent.ExecutorService
 import java.util.concurrent.Executors
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
@@ -94,16 +96,16 @@ class InternalStreamConnectionSpecification extends Specification {
     def internalConnectionInitializationDescription =
             new InternalConnectionInitializationDescription(connectionDescription, serverDescription)
     def stream = Mock(Stream) {
-        openAsync(_) >> { it[0].completed(null) }
+        openAsync(_, _) >> { it.last().completed(null) }
     }
     def streamFactory = Mock(StreamFactory) {
         create(_) >> { stream }
     }
     def initializer = Mock(InternalConnectionInitializer) {
-        startHandshake(_) >> { internalConnectionInitializationDescription }
-        finishHandshake(_, _) >> { internalConnectionInitializationDescription }
-        startHandshakeAsync(_, _) >> { it[1].onResult(internalConnectionInitializationDescription, null) }
-        finishHandshakeAsync(_, _, _) >> { it[2].onResult(internalConnectionInitializationDescription, null) }
+        startHandshake(_, _) >> { internalConnectionInitializationDescription }
+        finishHandshake(_, _, _) >> { internalConnectionInitializationDescription }
+        startHandshakeAsync(_, _, _) >> { it[2].onResult(internalConnectionInitializationDescription, null) }
+        finishHandshakeAsync(_, _, _, _) >> { it[3].onResult(internalConnectionInitializationDescription, null) }
     }
 
     def getConnection() {
@@ -113,7 +115,7 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def getOpenedConnection() {
         def connection = getConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         connection
     }
 
@@ -131,7 +133,7 @@ class InternalStreamConnectionSpecification extends Specification {
                 .lastUpdateTimeNanos(connection.getInitialServerDescription().getLastUpdateTime(NANOSECONDS))
                 .build()
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         connection.opened()
@@ -158,7 +160,7 @@ class InternalStreamConnectionSpecification extends Specification {
                 .build()
 
         when:
-        connection.openAsync(futureResultCallback)
+        connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -170,13 +172,13 @@ class InternalStreamConnectionSpecification extends Specification {
     def 'should close the stream when initialization throws an exception'() {
         given:
         def failedInitializer = Mock(InternalConnectionInitializer) {
-            startHandshake(_) >> { throw new MongoInternalException('Something went wrong') }
+            startHandshake(_, _) >> { throw new MongoInternalException('Something went wrong') }
         }
         def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null,
                 failedInitializer)
 
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         thrown MongoInternalException
@@ -187,14 +189,14 @@ class InternalStreamConnectionSpecification extends Specification {
     def 'should close the stream when initialization throws an exception asynchronously'() {
         given:
         def failedInitializer = Mock(InternalConnectionInitializer) {
-            startHandshakeAsync(_, _) >> { it[1].onResult(null, new MongoInternalException('Something went wrong')) }
+            startHandshakeAsync(_, _, _) >> { it[2].onResult(null, new MongoInternalException('Something went wrong')) }
         }
         def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null,
                 failedInitializer)
 
         when:
         def futureResultCallback = new FutureResultCallback<Void>()
-        connection.openAsync(futureResultCallback)
+        connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -204,21 +206,21 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should close the stream when writing a message throws an exception'() {
         given:
-        stream.write(_) >> { throw new IOException('Something went wrong') }
+        stream.write(_, _) >> { throw new IOException('Something went wrong') }
 
         def connection = getOpenedConnection()
         def (buffers1, messageId1) = helper.hello()
         def (buffers2, messageId2) = helper.hello()
 
         when:
-        connection.sendMessage(buffers1, messageId1)
+        connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT)
 
         then:
         connection.isClosed()
         thrown MongoSocketWriteException
 
         when:
-        connection.sendMessage(buffers2, messageId2)
+        connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT)
 
         then:
         thrown MongoSocketClosedException
@@ -231,7 +233,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync()
         int seen = 0
 
-        stream.writeAsync(_, _) >> { List<ByteBuf> buffers, AsyncCompletionHandler<Void> callback ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, callback ->
             if (seen == 0) {
                 seen += 1
                 return callback.failed(new IOException('Something went wrong'))
@@ -242,7 +244,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def connection = getOpenedConnection()
 
         when:
-        connection.sendMessageAsync(buffers1, messageId1, sndCallbck1)
+        connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1)
         sndCallbck1.get(10, SECONDS)
 
         then:
@@ -250,7 +252,7 @@ class InternalStreamConnectionSpecification extends Specification {
         connection.isClosed()
 
         when:
-        connection.sendMessageAsync(buffers2, messageId2, sndCallbck2)
+        connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2)
         sndCallbck2.get(10, SECONDS)
 
         then:
@@ -259,23 +261,23 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should close the stream when reading the message header throws an exception'() {
         given:
-        stream.read(16, 0) >> { throw new IOException('Something went wrong') }
+        stream.read(16, _) >> { throw new IOException('Something went wrong') }
 
         def connection = getOpenedConnection()
         def (buffers1, messageId1) = helper.hello()
         def (buffers2, messageId2) = helper.hello()
 
         when:
-        connection.sendMessage(buffers1, messageId1)
-        connection.sendMessage(buffers2, messageId2)
-        connection.receiveMessage(messageId1)
+        connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT)
+        connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT)
+        connection.receiveMessage(messageId1, OPERATION_CONTEXT)
 
         then:
         connection.isClosed()
         thrown MongoSocketReadException
 
         when:
-        connection.receiveMessage(messageId2)
+        connection.receiveMessage(messageId2, OPERATION_CONTEXT)
 
         then:
         thrown MongoSocketClosedException
@@ -283,12 +285,12 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInternalException when reply header message length > max message length'() {
         given:
-        stream.read(36, 0) >> { helper.headerWithMessageSizeGreaterThanMax(1) }
+        stream.read(36, _) >> { helper.headerWithMessageSizeGreaterThanMax(1) }
 
         def connection = getOpenedConnection()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         thrown(MongoInternalException)
@@ -297,7 +299,7 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInternalException when reply header message length > max message length asynchronously'() {
         given:
-        stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler<ByteBuf> handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.headerWithMessageSizeGreaterThanMax(1, connectionDescription.maxMessageSize))
         }
 
@@ -305,7 +307,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         when:
-        connection.receiveMessageAsync(1, callback)
+        connection.receiveMessageAsync(1, OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -315,12 +317,12 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws InterruptedIOException'() {
         given:
-        stream.write(_) >> { throw new InterruptedIOException() }
+        stream.write(_, _) >> { throw new InterruptedIOException() }
         def connection = getOpenedConnection()
         Thread.currentThread().interrupt()
 
         when:
-        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1)
+        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -330,11 +332,11 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInterruptedException and leave the interrupt status unset when Stream.write throws InterruptedIOException'() {
         given:
-        stream.write(_) >> { throw new InterruptedIOException() }
+        stream.write(_, _) >> { throw new InterruptedIOException() }
         def connection = getOpenedConnection()
 
         when:
-        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1)
+        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT)
 
         then:
         !Thread.interrupted()
@@ -344,12 +346,12 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws ClosedByInterruptException'() {
         given:
-        stream.write(_) >> { throw new ClosedByInterruptException() }
+        stream.write(_, _) >> { throw new ClosedByInterruptException() }
         def connection = getOpenedConnection()
         Thread.currentThread().interrupt()
 
         when:
-        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1)
+        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -359,12 +361,12 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoInterruptedException when Stream.write throws SocketException and the thread is interrupted'() {
         given:
-        stream.write(_) >> { throw new SocketException() }
+        stream.write(_, _) >> { throw new SocketException() }
         def connection = getOpenedConnection()
         Thread.currentThread().interrupt()
 
         when:
-        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1)
+        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -374,11 +376,11 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should throw MongoSocketWriteException when Stream.write throws SocketException and the thread is not interrupted'() {
         given:
-        stream.write(_) >> { throw new SocketException() }
+        stream.write(_, _) >> { throw new SocketException() }
         def connection = getOpenedConnection()
 
         when:
-        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1)
+        connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketWriteException)
@@ -392,7 +394,7 @@ class InternalStreamConnectionSpecification extends Specification {
         Thread.currentThread().interrupt()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -406,7 +408,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def connection = getOpenedConnection()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         !Thread.interrupted()
@@ -421,7 +423,7 @@ class InternalStreamConnectionSpecification extends Specification {
         Thread.currentThread().interrupt()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -436,7 +438,7 @@ class InternalStreamConnectionSpecification extends Specification {
         Thread.currentThread().interrupt()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         Thread.interrupted()
@@ -450,13 +452,95 @@ class InternalStreamConnectionSpecification extends Specification {
         def connection = getOpenedConnection()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         thrown(MongoSocketReadException)
         connection.isClosed()
     }
 
+    def 'Should throw timeout exception with underlying socket exception as a cause when Stream.read throws SocketException'() {
+        given:
+        stream.read(_, _) >> { throw new SocketTimeoutException() }
+        def connection = getOpenedConnection()
+
+        when:
+        connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext(
+                new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)))
+
+        then:
+        def timeoutException = thrown(MongoOperationTimeoutException)
+        def mongoSocketReadTimeoutException = timeoutException.getCause()
+        mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException
+        mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException
+
+        connection.isClosed()
+    }
+
+    def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException'() {
+        given:
+        stream.read(_, _) >> { throw new MongoSocketReadTimeoutException("test", new ServerAddress(), null) }
+        def connection = getOpenedConnection()
+
+        when:
+        connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext(
+                new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)))
+
+        then:
+        def timeoutException = thrown(MongoOperationTimeoutException)
+        def mongoSocketReadTimeoutException = timeoutException.getCause()
+        mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException
+        mongoSocketReadTimeoutException.getCause() == null
+
+        connection.isClosed()
+    }
+
+
+    def 'Should wrap SocketException with timeout exception when Stream.read throws SocketException async'() {
+        given:
+        stream.readAsync(_ , _, _) >> { numBytes, operationContext, handler ->
+            handler.failed(new SocketTimeoutException())
+        }
+        def connection = getOpenedConnection()
+        def callback = new FutureResultCallback()
+        def operationContext = OPERATION_CONTEXT.withTimeoutContext(
+                new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))
+        when:
+        connection.receiveMessageAsync(1, operationContext, callback)
+        callback.get()
+
+        then:
+        def timeoutException = thrown(MongoOperationTimeoutException)
+        def mongoSocketReadTimeoutException = timeoutException.getCause()
+        mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException
+        mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException
+
+        connection.isClosed()
+    }
+
+    def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException async'() {
+        given:
+        stream.readAsync(_, _, _) >> { numBytes, operationContext, handler ->
+            handler.failed(new MongoSocketReadTimeoutException("test", new ServerAddress(), null))
+        }
+
+        def connection = getOpenedConnection()
+        def callback = new FutureResultCallback()
+        def operationContext = OPERATION_CONTEXT.withTimeoutContext(
+                new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))
+        when:
+        connection.receiveMessageAsync(1, operationContext, callback)
+        callback.get()
+
+        then:
+        def timeoutException = thrown(MongoOperationTimeoutException)
+        def mongoSocketReadTimeoutException = timeoutException.getCause()
+        mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException
+        mongoSocketReadTimeoutException.getCause() == null
+
+        connection.isClosed()
+    }
+
     def 'should close the stream when reading the message header throws an exception asynchronously'() {
         given:
         int seen = 0
@@ -464,26 +548,26 @@ class InternalStreamConnectionSpecification extends Specification {
         def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync()
         def headers = helper.generateHeaders([messageId1, messageId2])
 
-        stream.writeAsync(_, _) >> { List<ByteBuf> buffers, AsyncCompletionHandler<Void> callback ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, callback ->
             callback.completed(null)
         }
-        stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler<ByteBuf> handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             if (seen == 0) {
                 seen += 1
                 return handler.failed(new IOException('Something went wrong'))
             }
             handler.completed(headers.pop())
         }
-        stream.readAsync(94, _) >> { int numBytes, AsyncCompletionHandler<ByteBuf> handler ->
+        stream.readAsync(94, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultBody())
         }
         def connection = getOpenedConnection()
 
         when:
-        connection.sendMessageAsync(buffers1, messageId1, sndCallbck1)
-        connection.sendMessageAsync(buffers2, messageId2, sndCallbck2)
-        connection.receiveMessageAsync(messageId1, rcvdCallbck1)
-        connection.receiveMessageAsync(messageId2, rcvdCallbck2)
+        connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1)
+        connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2)
+        connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1)
+        connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2)
         rcvdCallbck1.get(1, SECONDS)
 
         then:
@@ -499,20 +583,20 @@ class InternalStreamConnectionSpecification extends Specification {
 
     def 'should close the stream when reading the message body throws an exception'() {
         given:
-        stream.read(16, 0) >> helper.defaultMessageHeader(1)
-        stream.read(90, 0) >> { throw new IOException('Something went wrong') }
+        stream.read(16, _) >> helper.defaultMessageHeader(1)
+        stream.read(90, _) >> { throw new IOException('Something went wrong') }
 
         def connection = getOpenedConnection()
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         connection.isClosed()
         thrown MongoSocketReadException
 
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         thrown MongoSocketClosedException
@@ -525,21 +609,21 @@ class InternalStreamConnectionSpecification extends Specification {
         def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync()
         def headers = helper.generateHeaders([messageId1, messageId2])
 
-        stream.writeAsync(_, _) >> { List<ByteBuf> buffers, AsyncCompletionHandler<Void> callback ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, callback ->
             callback.completed(null)
         }
-        stream.readAsync(16, _) >> { int numBytes, AsyncCompletionHandler<ByteBuf> handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(headers.remove(0))
         }
-        stream.readAsync(_, _) >> { int numBytes, AsyncCompletionHandler<ByteBuf> handler ->
+        stream.readAsync(_, _, _) >> { numBytes, operationContext, handler ->
             handler.failed(new IOException('Something went wrong'))
         }
         def connection = getOpenedConnection()
 
         when:
-        connection.sendMessageAsync(buffers1, messageId1, sndCallbck1)
-        connection.sendMessageAsync(buffers2, messageId2, sndCallbck2)
-        connection.receiveMessageAsync(messageId1, rcvdCallbck1)
+        connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1)
+        connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2)
+        connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1)
         rcvdCallbck1.get(1, SECONDS)
 
         then:
@@ -547,7 +631,7 @@ class InternalStreamConnectionSpecification extends Specification {
         connection.isClosed()
 
         when:
-        connection.receiveMessageAsync(messageId2, rcvdCallbck2)
+        connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2)
         rcvdCallbck2.get(1, SECONDS)
 
         then:
@@ -562,12 +646,11 @@ class InternalStreamConnectionSpecification extends Specification {
                 null)
         def response = '{ok : 0, errmsg : "failed"}'
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.messageHeader(commandMessage.getId(), response)
-        stream.read(_, 0) >> helper.reply(response)
+        stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response)
+        stream.read(_, _) >> helper.reply(response)
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         thrown(MongoCommandException)
@@ -584,19 +667,18 @@ class InternalStreamConnectionSpecification extends Specification {
         def response = '{ok : 0, errmsg : "failed"}'
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(_, _) >> { numBytes, handler ->
+        stream.readAsync(_, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.reply(response))
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -612,7 +694,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def messages = (1..numberOfOperations).collect { helper.helloAsync() }
 
         def streamLatch = new CountDownLatch(1)
-        stream.writeAsync(_, _) >> { List<ByteBuf> buffers, AsyncCompletionHandler<Void> callback ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, callback ->
             streamPool.submit {
                 streamLatch.await()
                 callback.failed(new IOException())
@@ -624,7 +706,7 @@ class InternalStreamConnectionSpecification extends Specification {
         def callbacks = []
         (1..numberOfOperations).each { n ->
             def (buffers, messageId, sndCallbck, rcvdCallbck) = messages.pop()
-            connection.sendMessageAsync(buffers, messageId, sndCallbck)
+            connection.sendMessageAsync(buffers, messageId, OPERATION_CONTEXT, sndCallbck)
             callbacks.add(sndCallbck)
         }
         streamLatch.countDown()
@@ -645,12 +727,11 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE,
                 null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(90, 0) >> helper.defaultReply()
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(90, _) >> helper.defaultReply()
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         commandListener.eventsWereDelivered([
@@ -667,13 +748,13 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE,
                 null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(90, 0) >> helper.defaultReply()
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(90, _) >> helper.defaultReply()
 
         when:
         connection.sendAndReceive(commandMessage, {
             BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('')
-        }, NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext())
+        }, OPERATION_CONTEXT)
 
         then:
         thrown(CodecConfigurationException)
@@ -696,17 +777,17 @@ class InternalStreamConnectionSpecification extends Specification {
                             $clusterTime :  { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } }
                           }'''
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(_, 0) >> helper.reply(response)
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(_, _) >> helper.reply(response)
         def sessionContext = Mock(SessionContext) {
             1 * advanceOperationTime(BsonDocument.parse(response).getTimestamp('operationTime'))
             1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime'))
             getReadConcern() >> ReadConcern.DEFAULT
         }
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), sessionContext, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), operationContext)
 
         then:
         true
@@ -725,13 +806,13 @@ class InternalStreamConnectionSpecification extends Specification {
                             $clusterTime :  { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } }
                           }'''
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(_, _) >> { numBytes, handler ->
+        stream.readAsync(_, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.reply(response))
         }
         def sessionContext = Mock(SessionContext) {
@@ -739,10 +820,10 @@ class InternalStreamConnectionSpecification extends Specification {
             1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime'))
             getReadConcern() >> ReadConcern.DEFAULT
         }
+        def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext)
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), sessionContext, IgnorableRequestContext.INSTANCE,
-                new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), operationContext, callback)
         callback.get()
 
         then:
@@ -756,11 +837,10 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE,
                 null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.write(_) >> { throw new MongoSocketWriteException('Failed to write', serverAddress, new IOException()) }
+        stream.write(_, _) >> { throw new MongoSocketWriteException('Failed to write', serverAddress, new IOException()) }
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoSocketWriteException)
@@ -777,11 +857,10 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE,
                 null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> { throw new MongoSocketReadException('Failed to read', serverAddress) }
+        stream.read(16, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) }
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoSocketReadException)
@@ -798,12 +877,11 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE,
                 null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(90, 0) >> { throw new MongoSocketReadException('Failed to read', serverAddress) }
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(90, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) }
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoSocketException)
@@ -821,12 +899,11 @@ class InternalStreamConnectionSpecification extends Specification {
                 null)
         def response = '{ok : 0, errmsg : "failed"}'
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.messageHeader(commandMessage.getId(), response)
-        stream.read(_, 0) >> helper.reply(response)
+        stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response)
+        stream.read(_, _) >> helper.reply(response)
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         def e = thrown(MongoCommandException)
@@ -843,12 +920,11 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings,
                 MULTIPLE, null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(90, 0) >> helper.defaultReply()
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(90, _) >> helper.defaultReply()
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         commandListener.eventsWereDelivered([
@@ -880,12 +956,11 @@ class InternalStreamConnectionSpecification extends Specification {
         def commandMessage = new CommandMessage(cmdNamespace, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings,
                 MULTIPLE, null)
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.read(16, 0) >> helper.defaultMessageHeader(commandMessage.getId())
-        stream.read(_, 0) >> helper.reply('{ok : 0, errmsg : "failed"}')
+        stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId())
+        stream.read(_, _) >> helper.reply('{ok : 0, errmsg : "failed"}')
 
         when:
-        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE,
-                new OperationContext())
+        connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT)
 
         then:
         thrown(MongoCommandException)
@@ -920,19 +995,18 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(90, _) >> { numBytes, handler ->
+        stream.readAsync(90, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultReply())
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -952,20 +1026,20 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(90, _) >> { numBytes, handler ->
+        stream.readAsync(90, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultReply())
         }
 
         when:
         connection.sendAndReceiveAsync(commandMessage, {
             BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('')
-        }, NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        }, OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -987,13 +1061,12 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.failed(new MongoSocketWriteException('failed', serverAddress, new IOException()))
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -1013,16 +1086,15 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.failed(new MongoSocketReadException('Failed to read', serverAddress))
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -1042,19 +1114,18 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(90, _) >> { numBytes, handler ->
+        stream.readAsync(90, _, _) >> { numBytes, operationContext, handler ->
             handler.failed(new MongoSocketReadException('Failed to read', serverAddress))
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -1075,19 +1146,18 @@ class InternalStreamConnectionSpecification extends Specification {
         def response = '{ok : 0, errmsg : "failed"}'
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(_, _) >> { numBytes, handler ->
+        stream.readAsync(_, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.reply(response))
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
@@ -1107,19 +1177,18 @@ class InternalStreamConnectionSpecification extends Specification {
         def callback = new FutureResultCallback()
 
         stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) }
-        stream.writeAsync(_, _) >> { buffers, handler ->
+        stream.writeAsync(_, _, _) >> { buffers, operationContext, handler ->
             handler.completed(null)
         }
-        stream.readAsync(16, _) >> { numBytes, handler ->
+        stream.readAsync(16, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultMessageHeader(commandMessage.getId()))
         }
-        stream.readAsync(90, _) >> { numBytes, handler ->
+        stream.readAsync(90, _, _) >> { numBytes, operationContext, handler ->
             handler.completed(helper.defaultReply())
         }
 
         when:
-        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), NoOpSessionContext.INSTANCE,
-                IgnorableRequestContext.INSTANCE, new OperationContext(), callback)
+        connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback)
         callback.get()
 
         then:
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy
index 4ea47fb3694..374687f7d01 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy
@@ -29,6 +29,9 @@ import spock.lang.Unroll
 import javax.management.ObjectName
 import java.lang.management.ManagementFactory
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY
+
 class JMXConnectionPoolListenerSpecification extends Specification {
     private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress('host1', 27018))
 
@@ -43,12 +46,12 @@ class JMXConnectionPoolListenerSpecification extends Specification {
         given:
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
                 ConnectionPoolSettings.builder().minSize(0).maxSize(5)
-                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider())
+                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
         provider.ready()
 
         when:
-        provider.get(new OperationContext())
-        provider.get(new OperationContext()).close()
+        provider.get(OPERATION_CONTEXT)
+        provider.get(OPERATION_CONTEXT).close()
 
         then:
         with(jmxListener.getMBean(SERVER_ID)) {
@@ -68,7 +71,7 @@ class JMXConnectionPoolListenerSpecification extends Specification {
         when:
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
                 ConnectionPoolSettings.builder().minSize(0).maxSize(5)
-                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider())
+                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         then:
         ManagementFactory.getPlatformMBeanServer().isRegistered(
@@ -82,7 +85,7 @@ class JMXConnectionPoolListenerSpecification extends Specification {
         given:
         provider = new DefaultConnectionPool(SERVER_ID, connectionFactory,
                 ConnectionPoolSettings.builder().minSize(0).maxSize(5)
-                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider())
+                        .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY)
 
         when:
         provider.close()
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java
index 2d3e6dbb49d..ad447f3da65 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java
@@ -19,6 +19,7 @@
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoConfigurationException;
 import com.mongodb.MongoException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.async.FutureResultCallback;
@@ -29,8 +30,8 @@
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.connection.ServerSettings;
 import com.mongodb.connection.ServerType;
-import com.mongodb.selector.ServerSelector;
 import com.mongodb.lang.NonNull;
+import com.mongodb.selector.ServerSelector;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.RepeatedTest;
 import org.junit.jupiter.api.Tag;
@@ -50,6 +51,9 @@
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static com.mongodb.ClusterFixture.createOperationContext;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -90,14 +94,14 @@ public void shouldSelectServerWhenThereIsNoSRVLookup() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, mock(DnsSrvRecordMonitorFactory.class));
 
         // when
-        ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), new OperationContext());
+        ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT);
 
         // then
         assertServerTupleExpectations(serverAddress, expectedServer, serverTuple);
 
         // when
         FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
-        cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+        cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback);
         serverTuple = callback.get();
 
         // then
@@ -125,7 +129,7 @@ public void shouldSelectServerWhenThereIsSRVLookup() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         // when
-        ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), new OperationContext());
+        ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT);
 
         // then
         assertServerTupleExpectations(resolvedServerAddress, expectedServer, serverTuple);
@@ -153,7 +157,7 @@ public void shouldSelectServerAsynchronouslyWhenThereIsSRVLookup() {
 
         // when
         FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
-        cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+        cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback);
         ServerTuple serverTuple = callback.get();
 
         // then
@@ -179,7 +183,7 @@ public void shouldFailSelectServerWhenThereIsSRVMisconfiguration() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         MongoClientException exception = assertThrows(MongoClientException.class, () -> cluster.selectServer(mock(ServerSelector.class),
-                new OperationContext()));
+                OPERATION_CONTEXT));
         assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts",
                 exception.getMessage());
     }
@@ -203,7 +207,7 @@ public void shouldFailSelectServerAsynchronouslyWhenThereIsSRVMisconfiguration()
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
-        cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+        cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback);
 
         MongoClientException exception = assertThrows(MongoClientException.class, callback::get);
         assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts",
@@ -218,7 +222,6 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookup() {
         ClusterableServer expectedServer = mock(ClusterableServer.class);
 
         ClusterSettings clusterSettings = ClusterSettings.builder()
-                .serverSelectionTimeout(5, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -232,8 +235,34 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookup() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class),
-                new OperationContext()));
-        assertEquals("Timed out after 5 ms while waiting to resolve SRV records for foo.bar.com.", exception.getMessage());
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5))));
+        assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com"));
+    }
+
+    @Test
+    public void shouldTimeoutSelectServerWhenThereIsSRVLookupAndTimeoutMsIsSet() {
+        // given
+        String srvHostName = "foo.bar.com";
+        ServerAddress resolvedServerAddress = new ServerAddress("host1");
+        ClusterableServer expectedServer = mock(ClusterableServer.class);
+
+        ClusterSettings clusterSettings = ClusterSettings.builder()
+                .mode(ClusterConnectionMode.LOAD_BALANCED)
+                .srvHost(srvHostName)
+                .build();
+
+        ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer);
+
+        DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class);
+        when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer(
+                invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(Duration.ofHours(1)));
+
+        cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
+
+        //when & then
+        MongoOperationTimeoutException exception = assertThrows(MongoOperationTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5).withTimeout(10L, MILLISECONDS))));
+        assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com"));
     }
 
     @Test
@@ -244,7 +273,6 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookupException() {
         ClusterableServer expectedServer = mock(ClusterableServer.class);
 
         ClusterSettings clusterSettings = ClusterSettings.builder()
-                .serverSelectionTimeout(10, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -259,10 +287,10 @@ public void shouldTimeoutSelectServerWhenThereIsSRVLookupException() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class),
-                new OperationContext()));
-        assertEquals("Timed out after 10 ms while waiting to resolve SRV records for foo.bar.com. "
-                        + "Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'",
-                exception.getMessage());
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10))));
+
+        assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com"));
+        assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'"));
     }
 
     @Test
@@ -274,7 +302,6 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookup() {
 
         ClusterSettings clusterSettings = ClusterSettings
                 .builder()
-                .serverSelectionTimeout(5, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -288,10 +315,11 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookup() {
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
-        cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+        cluster.selectServerAsync(mock(ServerSelector.class),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback);
 
         MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get);
-        assertEquals("Timed out after 5 ms while waiting to resolve SRV records for foo.bar.com.", exception.getMessage());
+        assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com"));
     }
 
     @Test
@@ -302,7 +330,6 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookupException
         ClusterableServer expectedServer = mock(ClusterableServer.class);
 
         ClusterSettings clusterSettings = ClusterSettings.builder()
-                .serverSelectionTimeout(10, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -317,12 +344,12 @@ public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookupException
         cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, dnsSrvRecordMonitorFactory);
 
         FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
-        cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+        cluster.selectServerAsync(mock(ServerSelector.class),
+                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10)), callback);
 
         MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get);
-        assertEquals("Timed out after 10 ms while waiting to resolve SRV records for foo.bar.com. "
-                        + "Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'",
-                exception.getMessage());
+        assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com"));
+        assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'"));
     }
 
     @Test
@@ -368,7 +395,6 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx
         ClusterableServer expectedServer = mock(ClusterableServer.class);
 
         ClusterSettings clusterSettings = ClusterSettings.builder()
-                .serverSelectionTimeout(5, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -389,7 +415,8 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx
                 boolean success = false;
                 while (!success) {
                     try {
-                        cluster.selectServer(mock(ServerSelector.class),  new OperationContext());
+                        cluster.selectServer(mock(ServerSelector.class),
+                                createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)));
                         success = true;
                     } catch (MongoTimeoutException e) {
                         // this is expected
@@ -397,7 +424,8 @@ public void synchronousConcurrentTest() throws InterruptedException, ExecutionEx
                 }
                 // Keep going for a little while
                 for (int j = 0; j < 100; j++) {
-                    cluster.selectServer(mock(ServerSelector.class),  new OperationContext());
+                    cluster.selectServer(mock(ServerSelector.class),
+                            createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)));
                 }
             }));
         }
@@ -417,7 +445,6 @@ public void asynchronousConcurrentTest() throws InterruptedException, ExecutionE
         ClusterableServer expectedServer = mock(ClusterableServer.class);
 
         ClusterSettings clusterSettings = ClusterSettings.builder()
-                .serverSelectionTimeout(5, MILLISECONDS)
                 .mode(ClusterConnectionMode.LOAD_BALANCED)
                 .srvHost(srvHostName)
                 .build();
@@ -447,13 +474,15 @@ public void asynchronousConcurrentTest() throws InterruptedException, ExecutionE
                 while (!dnsSrvRecordMonitorReference.get().isInitialized()) {
                     FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
                     callbacks.add(callback);
-                    cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+                    cluster.selectServerAsync(mock(ServerSelector.class),
+                            createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback);
                 }
                 // Keep going for a little while
                 for (int j = 0; j < 100; j++) {
                     FutureResultCallback<ServerTuple> callback = new FutureResultCallback<>();
                     callbacks.add(callback);
-                    cluster.selectServerAsync(mock(ServerSelector.class), new OperationContext(), callback);
+                    cluster.selectServerAsync(mock(ServerSelector.class),
+                            createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback);
                 }
             }));
         }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy
index 9c3fb0d91db..b317f3dd0ba 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy
@@ -30,6 +30,7 @@ import com.mongodb.event.CommandListener
 import com.mongodb.event.CommandStartedEvent
 import com.mongodb.event.CommandSucceededEvent
 import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.diagnostics.logging.Logger
 import com.mongodb.internal.logging.StructuredLogger
 import com.mongodb.internal.validator.NoOpFieldNameValidator
@@ -39,6 +40,7 @@ import org.bson.BsonInt32
 import org.bson.BsonString
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
 import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION
@@ -57,14 +59,14 @@ class LoggingCommandEventSenderSpecification extends Specification {
         def message = new CommandMessage(namespace, commandDocument,
                 new NoOpFieldNameValidator(), ReadPreference.primary(), messageSettings, MULTIPLE, null)
         def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())
-        message.encode(bsonOutput, NoOpSessionContext.INSTANCE)
+        message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                Stub(TimeoutContext), null))
         def logger = Stub(Logger) {
             isDebugEnabled() >> debugLoggingEnabled
         }
-        def context = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
         def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener,
-                IgnorableRequestContext.INSTANCE, context, message, bsonOutput, new StructuredLogger(logger),
-                LoggerSettings.builder().build())
+                operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build())
 
         when:
         sender.sendStartedEvent()
@@ -73,17 +75,17 @@ class LoggingCommandEventSenderSpecification extends Specification {
         sender.sendFailedEvent(failureException)
 
         then:
-        commandListener.eventsWereDelivered(
-                [
-                        new CommandStartedEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName,
-                                commandDocument.getFirstKey(), commandDocument.append('$db', new BsonString(namespace.databaseName))),
-                        new CommandSucceededEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName,
-                                commandDocument.getFirstKey(), new BsonDocument(), 1),
-                        new CommandSucceededEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName,
-                                commandDocument.getFirstKey(), replyDocument, 1),
-                        new CommandFailedEvent(null, context.id, message.getId(), connectionDescription, namespace.databaseName,
-                                commandDocument.getFirstKey(), 1, failureException)
-                ])
+        commandListener.eventsWereDelivered([
+                new CommandStartedEvent(null, operationContext.id, message.getId(), connectionDescription,
+                        namespace.databaseName, commandDocument.getFirstKey(),
+                        commandDocument.append('$db', new BsonString(namespace.databaseName))),
+                new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription,
+                        namespace.databaseName, commandDocument.getFirstKey(), new BsonDocument(), 1),
+                new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription,
+                        namespace.databaseName, commandDocument.getFirstKey(), replyDocument, 1),
+                new CommandFailedEvent(null, operationContext.id, message.getId(), connectionDescription,
+                        namespace.databaseName, commandDocument.getFirstKey(), 1, failureException)
+        ])
 
         where:
         debugLoggingEnabled << [true, false]
@@ -102,13 +104,14 @@ class LoggingCommandEventSenderSpecification extends Specification {
         def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(),
                 messageSettings, MULTIPLE, null)
         def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())
-        message.encode(bsonOutput, NoOpSessionContext.INSTANCE)
+        message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                Stub(TimeoutContext), null))
         def logger = Mock(Logger) {
             isDebugEnabled() >> true
         }
-        def operationContext = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
         def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener,
-                IgnorableRequestContext.INSTANCE, operationContext, message, bsonOutput, new StructuredLogger(logger),
+                operationContext, message, bsonOutput, new StructuredLogger(logger),
                 LoggerSettings.builder().build())
         when:
         sender.sendStartedEvent()
@@ -158,14 +161,15 @@ class LoggingCommandEventSenderSpecification extends Specification {
         def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(),
                 messageSettings, SINGLE, null)
         def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())
-        message.encode(bsonOutput, NoOpSessionContext.INSTANCE)
+        message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                Stub(TimeoutContext), null))
         def logger = Mock(Logger) {
             isDebugEnabled() >> true
         }
-        def operationContext = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
 
-        def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, null, null,
-                operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build())
+        def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, null, operationContext,
+                message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build())
 
         when:
         sender.sendStartedEvent()
@@ -191,14 +195,14 @@ class LoggingCommandEventSenderSpecification extends Specification {
         def message = new CommandMessage(namespace, commandDocument, new NoOpFieldNameValidator(), ReadPreference.primary(),
                 messageSettings, SINGLE, null)
         def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())
-        message.encode(bsonOutput, NoOpSessionContext.INSTANCE)
+        message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                Stub(TimeoutContext), null))
         def logger = Mock(Logger) {
             isDebugEnabled() >> true
         }
-        def operationContext = new OperationContext()
+        def operationContext = OPERATION_CONTEXT
         def sender = new LoggingCommandEventSender(['createUser'] as Set, [] as Set, connectionDescription, null,
-                IgnorableRequestContext.INSTANCE, operationContext, message, bsonOutput, new StructuredLogger(logger),
-                LoggerSettings.builder().build())
+                operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build())
 
         when:
         sender.sendStartedEvent()
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy
index f14305bb6b8..e0f932f4963 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.connection
 
-
 import com.mongodb.ServerAddress
 import com.mongodb.connection.ClusterDescription
 import com.mongodb.connection.ClusterId
@@ -29,6 +28,7 @@ import com.mongodb.internal.selector.WritableServerSelector
 import org.bson.types.ObjectId
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
 import static com.mongodb.connection.ClusterType.REPLICA_SET
 import static com.mongodb.connection.ClusterType.SHARDED
@@ -94,7 +94,9 @@ class MultiServerClusterSpecification extends Specification {
         cluster.close()
 
         when:
-        cluster.getServersSnapshot()
+        cluster.getServersSnapshot(
+                OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(),
+                OPERATION_CONTEXT.getTimeoutContext())
 
         then:
         thrown(IllegalStateException)
@@ -379,7 +381,7 @@ class MultiServerClusterSpecification extends Specification {
         cluster.close()
 
         when:
-        cluster.selectServer(new WritableServerSelector(), new OperationContext())
+        cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT)
 
         then:
         thrown(IllegalStateException)
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java
index e4a4f80289c..12d8e9fa7c3 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java
@@ -30,6 +30,7 @@
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getServerApi;
 import static com.mongodb.internal.connection.MessageHelper.getApiVersionField;
 import static com.mongodb.internal.connection.MessageHelper.getDbField;
@@ -53,7 +54,7 @@ public void before() {
     public void testSuccessfulAuthentication() {
         enqueueSuccessfulReply();
 
-        subject.authenticate(connection, connectionDescription);
+        subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT);
 
         validateMessages();
     }
@@ -63,7 +64,7 @@ public void testSuccessfulAuthenticationAsync() throws ExecutionException, Inter
         enqueueSuccessfulReply();
 
         FutureResultCallback<Void> futureCallback = new FutureResultCallback<>();
-        subject.authenticateAsync(connection, connectionDescription, futureCallback);
+        subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback);
         futureCallback.get();
 
         validateMessages();
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy
index 0bf71212f10..069ece30dbe 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy
@@ -16,13 +16,14 @@
 
 package com.mongodb.internal.connection
 
-
 import com.mongodb.MongoCommandException
 import com.mongodb.MongoExecutionTimeoutException
 import com.mongodb.MongoNodeIsRecoveringException
 import com.mongodb.MongoNotPrimaryException
+import com.mongodb.MongoOperationTimeoutException
 import com.mongodb.MongoQueryException
 import com.mongodb.ServerAddress
+import com.mongodb.internal.TimeoutContext
 import org.bson.BsonBoolean
 import org.bson.BsonDocument
 import org.bson.BsonDouble
@@ -32,6 +33,7 @@ import org.bson.BsonNull
 import org.bson.BsonString
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.*
 import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException
 import static com.mongodb.internal.connection.ProtocolHelper.getQueryFailureException
 import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk
@@ -71,18 +73,37 @@ class ProtocolHelperSpecification extends Specification {
     def 'command failure exception should be MongoExecutionTimeoutException if error code is 50'() {
         expect:
         getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)),
-                                   new ServerAddress()) instanceof MongoExecutionTimeoutException
+                                   new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoExecutionTimeoutException
+    }
+
+    def 'command failure exception should be MongoOperationTimeoutException if error code is 50 and timeoutMS is set'() {
+        expect:
+        getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)),
+                new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))
+                instanceof MongoOperationTimeoutException
     }
 
     def 'query failure exception should be MongoExecutionTimeoutException if error code is 50'() {
         expect:
         getQueryFailureException(new BsonDocument('code', new BsonInt32(50)),
-                                 new ServerAddress()) instanceof MongoExecutionTimeoutException
+                                 new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoExecutionTimeoutException
+    }
+
+    def 'query failure exception should be MongoOperationTimeoutException if error code is 50'() {
+        expect:
+        def exception = getQueryFailureException(new BsonDocument('code', new BsonInt32(50)),
+                new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))
+        exception instanceof MongoOperationTimeoutException
+        exception.getCause() instanceof MongoExecutionTimeoutException
+
     }
 
     def 'command failure exceptions should handle MongoNotPrimaryException scenarios'() {
         expect:
-        getCommandFailureException(exception, new ServerAddress()) instanceof MongoNotPrimaryException
+        getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoNotPrimaryException
 
         where:
         exception << [
@@ -94,7 +115,8 @@ class ProtocolHelperSpecification extends Specification {
 
     def 'query failure exceptions should handle MongoNotPrimaryException scenarios'() {
         expect:
-        getQueryFailureException(exception, new ServerAddress()) instanceof MongoNotPrimaryException
+        getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoNotPrimaryException
 
         where:
         exception << [
@@ -106,7 +128,8 @@ class ProtocolHelperSpecification extends Specification {
 
     def 'command failure exceptions should handle MongoNodeIsRecoveringException scenarios'() {
         expect:
-        getCommandFailureException(exception, new ServerAddress()) instanceof MongoNodeIsRecoveringException
+        getCommandFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoNodeIsRecoveringException
 
         where:
         exception << [
@@ -121,7 +144,8 @@ class ProtocolHelperSpecification extends Specification {
 
     def 'query failure exceptions should handle MongoNodeIsRecoveringException scenarios'() {
         expect:
-        getQueryFailureException(exception, new ServerAddress()) instanceof MongoNodeIsRecoveringException
+        getQueryFailureException(exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS))
+                instanceof MongoNodeIsRecoveringException
 
         where:
         exception << [
@@ -137,13 +161,13 @@ class ProtocolHelperSpecification extends Specification {
     def 'command failure exception should be MongoCommandException'() {
         expect:
         getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('errmsg', new BsonString('some other problem')),
-                                   new ServerAddress()) instanceof MongoCommandException
+                                   new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoCommandException
     }
 
     def 'query failure exception should be MongoQueryException'() {
         expect:
         getQueryFailureException(new BsonDocument('$err', new BsonString('some other problem')),
-                                 new ServerAddress()) instanceof MongoQueryException
+                                 new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoQueryException
     }
 
 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java
new file mode 100644
index 00000000000..b44afb7a725
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.connection;
+
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.util.List;
+import java.util.stream.Stream;
+
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+
+public class RoundTripTimeSamplerTest {
+
+    @ParameterizedTest(name = "{index}: samples: {0}. Expected: average: {1} min: {2}")
+    @DisplayName("RoundTripTimeSampler should calculate the expected average and min round trip times")
+    @MethodSource
+    public void testRoundTripTimeSampler(final List<Integer> samples, final int expectedAverageRTT, final int expectedMinRTT) {
+        RoundTripTimeSampler sampler = new RoundTripTimeSampler();
+        samples.forEach(sampler::addSample);
+
+        assertEquals(expectedMinRTT, sampler.getMin());
+        assertEquals(expectedAverageRTT, sampler.getAverage());
+    }
+
+    private static Stream<Arguments> testRoundTripTimeSampler() {
+        return Stream.of(
+                Arguments.of(emptyList(), 0, 0),
+                Arguments.of(singletonList(10), 10, 0),
+                Arguments.of(asList(10, 20), 12, 10),
+                Arguments.of(asList(10, 20, 8), 11, 8),
+                Arguments.of(asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), 11, 6)
+        );
+    }
+
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy
index 32295d12b7c..21f9bc28161 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy
@@ -23,6 +23,7 @@ import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ServerId
 import com.mongodb.connection.ServerType
+import com.mongodb.internal.TimeoutSettings
 import org.bson.BsonDocument
 import spock.lang.Specification
 
@@ -34,11 +35,13 @@ import static com.mongodb.MongoCredential.createScramSha1Credential
 import static com.mongodb.MongoCredential.createScramSha256Credential
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
 import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply
+import static com.mongodb.internal.connection.OperationContext.simpleOperationContext
 import static org.junit.Assert.assertEquals
 
 class ScramShaAuthenticatorSpecification extends Specification {
     def serverId = new ServerId(new ClusterId(), new ServerAddress('localhost', 27017))
     def connectionDescription = new ConnectionDescription(serverId)
+    def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null)
     private final static MongoCredentialWithCache SHA1_CREDENTIAL =
             new MongoCredentialWithCache(createScramSha1Credential('user', 'database', 'pencil' as char[]))
     private final static MongoCredentialWithCache SHA256_CREDENTIAL =
@@ -522,10 +525,10 @@ class ScramShaAuthenticatorSpecification extends Specification {
     def authenticate(TestInternalConnection connection, ScramShaAuthenticator authenticator, boolean async) {
         if (async) {
             FutureResultCallback<Void> futureCallback = new FutureResultCallback<Void>()
-            authenticator.authenticateAsync(connection, connectionDescription, futureCallback)
+            authenticator.authenticateAsync(connection, connectionDescription, operationContext, futureCallback)
             futureCallback.get(5, TimeUnit.SECONDS)
         } else {
-            authenticator.authenticate(connection, connectionDescription)
+            authenticator.authenticate(connection, connectionDescription, operationContext)
         }
     }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java
index 816bca3f3f9..f1c8f69eb29 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java
@@ -32,6 +32,8 @@
 
 import java.util.List;
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static com.mongodb.ClusterFixture.createOperationContext;
 import static java.util.Arrays.asList;
 import static java.util.Collections.unmodifiableList;
 import static org.junit.jupiter.api.Assertions.assertAll;
@@ -51,7 +53,7 @@ final class ServerDeprioritizationTest {
 
     @BeforeEach
     void beforeEach() {
-        serverDeprioritization = new OperationContext().getServerDeprioritization();
+        serverDeprioritization = createOperationContext(TIMEOUT_SETTINGS).getServerDeprioritization();
     }
 
     @Test
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java
index 2bc41fee1be..4a2e94c19a5 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java
@@ -19,6 +19,7 @@
 import com.mongodb.ServerAddress;
 import com.mongodb.connection.ClusterType;
 import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.time.Timeout;
 import org.bson.BsonDocument;
 import org.bson.BsonNull;
 import org.bson.BsonValue;
@@ -30,6 +31,7 @@
 import java.net.URISyntaxException;
 import java.util.Collection;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getClusterDescription;
 import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries;
 import static com.mongodb.internal.event.EventListenerHelper.NO_OP_CLUSTER_LISTENER;
@@ -120,7 +122,10 @@ private void assertServer(final String serverName, final BsonDocument expectedSe
 
         if (expectedServerDescriptionDocument.isDocument("pool")) {
             int expectedGeneration = expectedServerDescriptionDocument.getDocument("pool").getNumber("generation").intValue();
-            DefaultServer server = (DefaultServer) getCluster().getServersSnapshot().getServer(new ServerAddress(serverName));
+            Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout();
+            DefaultServer server = (DefaultServer) getCluster()
+                    .getServersSnapshot(serverSelectionTimeout, OPERATION_CONTEXT.getTimeoutContext())
+                    .getServer(new ServerAddress(serverName));
             assertEquals(expectedGeneration, server.getConnectionPool().getGeneration());
         }
     }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java
index 5b68d7f84bb..9a7a8492563 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java
@@ -43,7 +43,7 @@ public ServerSelectionRttTest(final String description, final BsonDocument defin
 
     @Test
     public void shouldPassAllOutcomes() {
-        ExponentiallyWeightedMovingAverage subject = new ExponentiallyWeightedMovingAverage(0.2);
+        RoundTripTimeSampler subject = new RoundTripTimeSampler();
 
         BsonValue current = definition.get("avg_rtt_ms");
         if (current.isNumber()) {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java
index 6f1a9d25bb1..878876d74bd 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java
@@ -40,6 +40,8 @@
 import java.util.Map;
 import java.util.stream.IntStream;
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
+import static com.mongodb.ClusterFixture.createOperationContext;
 import static com.mongodb.connection.ServerSelectionSelectionTest.buildClusterDescription;
 import static java.util.stream.Collectors.groupingBy;
 import static java.util.stream.Collectors.toList;
@@ -74,7 +76,8 @@ public ServerSelectionWithinLatencyWindowTest(
     @Test
     public void shouldPassAllOutcomes() {
         ServerSelector selector = new ReadPreferenceServerSelector(ReadPreference.nearest());
-        OperationContext.ServerDeprioritization emptyServerDeprioritization = new OperationContext().getServerDeprioritization();
+        OperationContext.ServerDeprioritization emptyServerDeprioritization = createOperationContext(TIMEOUT_SETTINGS)
+                .getServerDeprioritization();
         ClusterSettings defaultClusterSettings = ClusterSettings.builder().build();
         Map<ServerAddress, List<ServerTuple>> selectionResultsGroupedByServerAddress = IntStream.range(0, iterations)
                 .mapToObj(i -> BaseCluster.createCompleteSelectorAndSelectServer(selector, clusterDescription, serversSnapshot,
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy
index a3a0f6a2d6f..3ebd5c4eb0f 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy
@@ -28,6 +28,7 @@ import com.mongodb.event.ClusterListener
 import com.mongodb.internal.selector.WritableServerSelector
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
 import static com.mongodb.connection.ClusterType.REPLICA_SET
 import static com.mongodb.connection.ClusterType.UNKNOWN
@@ -76,7 +77,10 @@ class SingleServerClusterSpecification extends Specification {
         sendNotification(firstServer, STANDALONE)
 
         then:
-        cluster.getServersSnapshot().getServer(firstServer) == factory.getServer(firstServer)
+        cluster.getServersSnapshot(OPERATION_CONTEXT
+                        .getTimeoutContext()
+                        .computeServerSelectionTimeout(),
+                OPERATION_CONTEXT.getTimeoutContext()).getServer(firstServer) == factory.getServer(firstServer)
 
         cleanup:
         cluster?.close()
@@ -90,7 +94,8 @@ class SingleServerClusterSpecification extends Specification {
         cluster.close()
 
         when:
-        cluster.getServersSnapshot()
+        cluster.getServersSnapshot(OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(),
+                OPERATION_CONTEXT.getTimeoutContext())
 
         then:
         thrown(IllegalStateException)
@@ -140,7 +145,7 @@ class SingleServerClusterSpecification extends Specification {
         sendNotification(firstServer, getBuilder(firstServer).minWireVersion(1000).maxWireVersion(1000).build())
 
         when:
-        cluster.selectServer(new WritableServerSelector(), new OperationContext())
+        cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT)
 
         then:
         thrown(MongoIncompatibleDriverException)
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy
index 7745d9580ff..855951d425a 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy
@@ -16,9 +16,12 @@
 
 package com.mongodb.internal.connection
 
+import com.mongodb.ClusterFixture
 import com.mongodb.MongoNamespace
 import com.mongodb.ReadPreference
 import com.mongodb.async.FutureResultCallback
+import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonBinaryWriter
 import org.bson.BsonDocument
@@ -168,7 +171,10 @@ class StreamHelper {
                 new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new NoOpFieldNameValidator(), ReadPreference.primary(),
                 MessageSettings.builder().build(), SINGLE, null)
         OutputBuffer outputBuffer = new BasicOutputBuffer()
-        command.encode(outputBuffer, NoOpSessionContext.INSTANCE)
+        command.encode(outputBuffer, new OperationContext(
+                IgnorableRequestContext.INSTANCE,
+                NoOpSessionContext.INSTANCE,
+                new TimeoutContext(ClusterFixture.TIMEOUT_SETTINGS), null))
         nextMessageId++
         [outputBuffer.byteBuffers, nextMessageId]
     }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java
index d9491d79c4b..7811cdec815 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java
@@ -19,8 +19,6 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.binding.BindingContext;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.FieldNameValidator;
@@ -59,31 +57,32 @@ public ConnectionDescription getDescription() {
 
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context) {
-        return executeEnqueuedCommandBasedProtocol(context.getSessionContext());
+                         final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
+                         final OperationContext operationContext) {
+        return executeEnqueuedCommandBasedProtocol(operationContext);
     }
 
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
-            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
+            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext,
             final boolean responseExpected, @Nullable final SplittablePayload payload,
             @Nullable final FieldNameValidator payloadFieldNameValidator) {
-        return executeEnqueuedCommandBasedProtocol(context.getSessionContext());
+        return executeEnqueuedCommandBasedProtocol(operationContext);
     }
 
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
+            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext,
             final SingleResultCallback<T> callback) {
-        executeEnqueuedCommandBasedProtocolAsync(context.getSessionContext(), callback);
+        executeEnqueuedCommandBasedProtocolAsync(operationContext, callback);
     }
 
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
-            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context,
+            final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext,
             final boolean responseExpected, @Nullable final SplittablePayload payload,
             @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback<T> callback) {
-        executeEnqueuedCommandBasedProtocolAsync(context.getSessionContext(), callback);
+        executeEnqueuedCommandBasedProtocolAsync(operationContext, callback);
     }
 
     @Override
@@ -92,13 +91,14 @@ public void markAsPinned(final PinningMode pinningMode) {
     }
 
     @SuppressWarnings("unchecked")
-    private <T> T executeEnqueuedCommandBasedProtocol(final SessionContext sessionContext) {
-        return (T) executor.execute(enqueuedCommandProtocol, internalConnection, sessionContext);
+    private <T> T executeEnqueuedCommandBasedProtocol(final OperationContext operationContext) {
+        return (T) executor.execute(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext());
     }
 
     @SuppressWarnings("unchecked")
-    private <T> void executeEnqueuedCommandBasedProtocolAsync(final SessionContext sessionContext, final SingleResultCallback<T> callback) {
-        executor.executeAsync(enqueuedCommandProtocol, internalConnection, sessionContext, callback);
+    private <T> void executeEnqueuedCommandBasedProtocolAsync(final OperationContext operationContext,
+            final SingleResultCallback<T> callback) {
+        executor.executeAsync(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext(), callback);
     }
 
     void enqueueProtocol(final CommandProtocol protocol) {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java
index 479ae6ed921..008ae7bf7b7 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java
@@ -17,18 +17,15 @@
 package com.mongodb.internal.connection;
 
 import com.mongodb.MongoException;
-import com.mongodb.RequestContext;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import org.bson.ByteBuf;
 import org.bson.codecs.Decoder;
 import org.bson.types.ObjectId;
 
 import java.util.List;
-import java.util.concurrent.TimeUnit;
 
 public class TestConnectionPool implements ConnectionPool {
 
@@ -48,23 +45,22 @@ public ByteBuf getBuffer(final int capacity) {
             }
 
             @Override
-            public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
+            public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
             @Override
-            public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                    final RequestContext requestContext, final OperationContext operationContext) {
+            public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
             @Override
-            public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
+            public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
                 throw new UnsupportedOperationException();
             }
 
             @Override
-            public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
+            public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
                 throw new UnsupportedOperationException();
             }
 
@@ -75,24 +71,24 @@ public boolean hasMoreToCome() {
 
             @Override
             public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
-                    final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext,
-                    final SingleResultCallback<T> callback) {
+                    final OperationContext operationContext, final SingleResultCallback<T> callback) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
             @Override
-            public ResponseBuffers receiveMessage(final int responseTo) {
+            public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
             @Override
-            public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId,
+            public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext,
                                          final SingleResultCallback<Void> callback) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
             @Override
-            public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+            public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+                    final SingleResultCallback<ResponseBuffers> callback) {
                 throw new UnsupportedOperationException("Not implemented yet!");
             }
 
@@ -107,12 +103,12 @@ public ServerDescription getInitialServerDescription() {
             }
 
             @Override
-            public void open() {
+            public void open(final OperationContext operationContext) {
                 throw new UnsupportedOperationException("Not implemented yet");
             }
 
             @Override
-            public void openAsync(final SingleResultCallback<Void> callback) {
+            public void openAsync(final OperationContext operationContext, final SingleResultCallback<Void> callback) {
                 callback.onResult(null, new UnsupportedOperationException("Not implemented yet"));
             }
 
@@ -138,20 +134,12 @@ public int getGeneration() {
         };
     }
 
-    @Override
-    public InternalConnection get(final OperationContext operationContext, final long timeout, final TimeUnit timeUnit) {
-        if (exceptionToThrow != null) {
-            throw exceptionToThrow;
-        }
-        return get(operationContext);
-    }
-
     @Override
     public void getAsync(final OperationContext operationContext, final SingleResultCallback<InternalConnection> callback) {
         if (exceptionToThrow != null) {
             callback.onResult(null, exceptionToThrow);
         } else {
-            callback.onResult(get(new OperationContext()), null);
+            callback.onResult(get(operationContext), null);
         }
     }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java
index 9d8eda976d6..12008cdec93 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java
@@ -28,6 +28,9 @@
 import com.mongodb.event.ConnectionPoolListener;
 import com.mongodb.event.ConnectionPoolReadyEvent;
 import com.mongodb.event.ConnectionReadyEvent;
+import com.mongodb.internal.time.StartTime;
+import com.mongodb.internal.time.TimePointTest;
+import com.mongodb.internal.time.Timeout;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -84,6 +87,22 @@ public <T> int countEvents(final Class<T> eventClass) {
         return eventCount;
     }
 
+    public void waitForEvents(final List<Class<?>> eventClasses, final long time, final TimeUnit unit)
+            throws InterruptedException, TimeoutException {
+        Timeout timeout = StartTime.now().timeoutAfterOrInfiniteIfNegative(time, unit);
+        ArrayList<Object> seen = new ArrayList<>();
+
+        for (Class<?> eventClass : eventClasses) {
+            waitForEvent(eventClass, 1, TimePointTest.remaining(timeout, unit), unit);
+
+            if (TimePointTest.hasExpired(timeout)) {
+                throw new TimeoutException("Timed out waiting for event of type " + eventClass
+                        + ". Timing out after seeing " + seen);
+            }
+            seen.add(eventClass);
+        }
+    }
+
     public <T> void waitForEvent(final Class<T> eventClass, final int count, final long time, final TimeUnit unit)
             throws InterruptedException, TimeoutException {
         lock.lock();
@@ -106,6 +125,7 @@ public <T> void waitForEvent(final Class<T> eventClass, final int count, final l
         }
     }
 
+
     private <T> boolean containsEvent(final Class<T> eventClass, final int expectedEventCount) {
         return countEvents(eventClass) >= expectedEventCount;
     }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java
index 8e99c89c20d..2853780f93a 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java
@@ -17,14 +17,14 @@
 package com.mongodb.internal.connection;
 
 import com.mongodb.MongoException;
-import com.mongodb.RequestContext;
+import com.mongodb.ServerAddress;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ConnectionId;
+import com.mongodb.connection.ServerConnectionState;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.connection.ServerId;
 import com.mongodb.connection.ServerType;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.session.SessionContext;
 import org.bson.BsonBinaryReader;
 import org.bson.BsonDocument;
 import org.bson.ByteBuf;
@@ -55,6 +55,7 @@ private static class Interaction {
     }
 
     private final ConnectionDescription description;
+    private final ServerDescription serverDescription;
     private final BufferProvider bufferProvider;
     private final Deque<Interaction> replies;
     private final List<BsonInput> sent;
@@ -68,6 +69,10 @@ private static class Interaction {
     TestInternalConnection(final ServerId serverId, final ServerType serverType) {
         this.description = new ConnectionDescription(new ConnectionId(serverId), LATEST_WIRE_VERSION, serverType, 0, 0, 0,
                 Collections.emptyList());
+        this.serverDescription = ServerDescription.builder()
+                .address(new ServerAddress("localhost", 27017))
+                .type(serverType)
+                .state(ServerConnectionState.CONNECTED).build();
         this.bufferProvider = new SimpleBufferProvider();
 
         this.replies = new LinkedList<>();
@@ -103,15 +108,15 @@ public ConnectionDescription getDescription() {
 
     @Override
     public ServerDescription getInitialServerDescription() {
-        throw new UnsupportedOperationException();
+        return serverDescription;
     }
 
-    public void open() {
+    public void open(final OperationContext operationContext) {
         opened = true;
     }
 
     @Override
-    public void openAsync(final SingleResultCallback<Void> callback) {
+    public void openAsync(final OperationContext operationContext, final SingleResultCallback<Void> callback) {
         opened = true;
         callback.onResult(null, null);
     }
@@ -137,7 +142,7 @@ public int getGeneration() {
     }
 
     @Override
-    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
+    public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
         // repackage all byte buffers into a single byte buffer...
         int totalSize = 0;
         for (ByteBuf buf : byteBuffers) {
@@ -164,30 +169,29 @@ public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId
     }
 
     @Override
-    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                                final RequestContext requestContext, final OperationContext operationContext) {
+    public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
         try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) {
-            message.encode(bsonOutput, sessionContext);
-            sendMessage(bsonOutput.getByteBuffers(), message.getId());
+            message.encode(bsonOutput, operationContext);
+            sendMessage(bsonOutput.getByteBuffers(), message.getId(), operationContext);
         }
-        try (ResponseBuffers responseBuffers = receiveMessage(message.getId())) {
+        try (ResponseBuffers responseBuffers = receiveMessage(message.getId(), operationContext)) {
             boolean commandOk = isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer())));
             responseBuffers.reset();
             if (!commandOk) {
                 throw getCommandFailureException(getResponseDocument(responseBuffers, message, new BsonDocumentCodec()),
-                        description.getServerAddress());
+                        description.getServerAddress(), operationContext.getTimeoutContext());
             }
             return new ReplyMessage<>(responseBuffers, decoder, message.getId()).getDocument();
         }
     }
 
     @Override
-    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
+    public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
         throw new UnsupportedOperationException();
     }
 
     @Override
-    public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
+    public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
         throw new UnsupportedOperationException();
     }
 
@@ -204,11 +208,10 @@ private <T extends BsonDocument> T getResponseDocument(final ResponseBuffers res
     }
 
     @Override
-    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
-            final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext,
+    public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext,
             final SingleResultCallback<T> callback) {
         try {
-            T result = sendAndReceive(message, decoder, sessionContext, requestContext, operationContext);
+            T result = sendAndReceive(message, decoder, operationContext);
             callback.onResult(result, null);
         } catch (MongoException ex) {
             callback.onResult(null, ex);
@@ -233,7 +236,7 @@ private ReplyHeader replaceResponseTo(final ReplyHeader header, final int respon
         return new ReplyHeader(buffer, messageHeader);    }
 
     @Override
-    public ResponseBuffers receiveMessage(final int responseTo) {
+    public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
         if (this.replies.isEmpty()) {
             throw new MongoException("Test was not setup properly as too many calls to receiveMessage occured.");
         }
@@ -247,9 +250,10 @@ public ResponseBuffers receiveMessage(final int responseTo) {
     }
 
     @Override
-    public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final SingleResultCallback<Void> callback) {
+    public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext,
+            final SingleResultCallback<Void> callback) {
         try {
-            sendMessage(byteBuffers, lastRequestId);
+            sendMessage(byteBuffers, lastRequestId, operationContext);
             callback.onResult(null, null);
         } catch (Exception e) {
             callback.onResult(null, e);
@@ -257,9 +261,10 @@ public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequ
     }
 
     @Override
-    public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+    public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+            final SingleResultCallback<ResponseBuffers> callback) {
         try {
-            ResponseBuffers buffers = receiveMessage(responseTo);
+            ResponseBuffers buffers = receiveMessage(responseTo, operationContext);
             callback.onResult(buffers, null);
         } catch (MongoException ex) {
             callback.onResult(null, ex);
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java
index 0e53c55fc03..7669eab9b91 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java
@@ -16,14 +16,12 @@
 
 package com.mongodb.internal.connection;
 
-import com.mongodb.RequestContext;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.connection.ConnectionId;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.connection.ServerId;
 import com.mongodb.connection.ServerType;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.session.SessionContext;
 import org.bson.ByteBuf;
 import org.bson.codecs.Decoder;
 
@@ -69,12 +67,12 @@ public int getGeneration() {
             return generation;
         }
 
-        public void open() {
+        public void open(final OperationContext operationContext) {
             opened = true;
         }
 
         @Override
-        public void openAsync(final SingleResultCallback<Void> callback) {
+        public void openAsync(final OperationContext operationContext, final SingleResultCallback<Void> callback) {
             opened = true;
             callback.onResult(null, null);
         }
@@ -100,21 +98,20 @@ public ByteBuf getBuffer(final int size) {
         }
 
         @Override
-        public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId) {
+        public void sendMessage(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext) {
         }
 
         @Override
-        public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext,
-                                    final RequestContext requestContext, final OperationContext operationContext) {
+        public <T> T sendAndReceive(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
             return null;
         }
 
         @Override
-        public <T> void send(final CommandMessage message, final Decoder<T> decoder, final SessionContext sessionContext) {
+        public <T> void send(final CommandMessage message, final Decoder<T> decoder, final OperationContext operationContext) {
         }
 
         @Override
-        public <T> T receive(final Decoder<T> decoder, final SessionContext sessionContext) {
+        public <T> T receive(final Decoder<T> decoder, final OperationContext operationContext) {
             return null;
         }
 
@@ -125,23 +122,24 @@ public boolean hasMoreToCome() {
 
         @Override
         public <T> void sendAndReceiveAsync(final CommandMessage message, final Decoder<T> decoder,
-                final SessionContext sessionContext, final RequestContext requestContext, final OperationContext operationContext,
-                final SingleResultCallback<T> callback) {
+                final OperationContext operationContext, final SingleResultCallback<T> callback) {
             callback.onResult(null, null);
         }
 
         @Override
-        public ResponseBuffers receiveMessage(final int responseTo) {
+        public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) {
             return null;
         }
 
         @Override
-        public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final SingleResultCallback<Void> callback) {
+        public void sendMessageAsync(final List<ByteBuf> byteBuffers, final int lastRequestId, final OperationContext operationContext,
+                final SingleResultCallback<Void> callback) {
             callback.onResult(null, null);
         }
 
         @Override
-        public void receiveMessageAsync(final int responseTo, final SingleResultCallback<ResponseBuffers> callback) {
+        public void receiveMessageAsync(final int responseTo, final OperationContext operationContext,
+                final SingleResultCallback<ResponseBuffers> callback) {
             callback.onResult(null, null);
         }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java
index 970bfd42ff1..6fd27893c70 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java
@@ -17,17 +17,22 @@
 package com.mongodb.internal.connection;
 
 import com.mongodb.MongoTimeoutException;
+import com.mongodb.internal.TimeoutSettings;
 
 import java.util.concurrent.CountDownLatch;
 
+import static com.mongodb.ClusterFixture.createOperationContext;
+
 class TimeoutTrackingConnectionGetter implements Runnable {
     private final ConnectionPool connectionPool;
+    private final TimeoutSettings timeoutSettings;
     private final CountDownLatch latch = new CountDownLatch(1);
 
     private volatile boolean gotTimeout;
 
-    TimeoutTrackingConnectionGetter(final ConnectionPool connectionPool) {
+    TimeoutTrackingConnectionGetter(final ConnectionPool connectionPool, final TimeoutSettings timeoutSettings) {
         this.connectionPool = connectionPool;
+        this.timeoutSettings = timeoutSettings;
     }
 
     boolean isGotTimeout() {
@@ -37,7 +42,7 @@ boolean isGotTimeout() {
     @Override
     public void run() {
         try {
-            InternalConnection connection = connectionPool.get(new OperationContext());
+            InternalConnection connection = connectionPool.get(createOperationContext(timeoutSettings));
             connection.close();
         } catch (MongoTimeoutException e) {
             gotTimeout = true;
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy
index 8eb75a44d2f..d2e5414bd56 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy
@@ -16,19 +16,18 @@
 
 package com.mongodb.internal.connection
 
-
 import com.mongodb.MongoNamespace
 import com.mongodb.ServerAddress
 import com.mongodb.async.FutureResultCallback
 import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ServerId
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonDocument
 import org.bson.BsonInt32
 import org.bson.codecs.BsonDocumentCodec
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.connection.ClusterConnectionMode.SINGLE
 
@@ -51,7 +50,7 @@ class UsageTrackingConnectionSpecification extends Specification {
         connection.openedAt == Long.MAX_VALUE
 
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         connection.openedAt <= System.currentTimeMillis()
@@ -67,7 +66,7 @@ class UsageTrackingConnectionSpecification extends Specification {
         connection.openedAt == Long.MAX_VALUE
 
         when:
-        connection.openAsync(futureResultCallback)
+        connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -82,7 +81,7 @@ class UsageTrackingConnectionSpecification extends Specification {
         connection.lastUsedAt == Long.MAX_VALUE
 
         when:
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
 
         then:
         connection.lastUsedAt <= System.currentTimeMillis()
@@ -98,7 +97,7 @@ class UsageTrackingConnectionSpecification extends Specification {
         connection.lastUsedAt == Long.MAX_VALUE
 
         when:
-        connection.openAsync(futureResultCallback)
+        connection.openAsync(OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -108,11 +107,11 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on sendMessage'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
 
         when:
-        connection.sendMessage(Arrays.asList(), 1)
+        connection.sendMessage([], 1, OPERATION_CONTEXT)
 
         then:
         connection.lastUsedAt >= openedLastUsedAt
@@ -123,12 +122,12 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on sendMessage asynchronously'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
         def futureResultCallback = new FutureResultCallback<Void>()
 
         when:
-        connection.sendMessageAsync(Arrays.asList(), 1, futureResultCallback)
+        connection.sendMessageAsync([], 1, OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -139,10 +138,10 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on receiveMessage'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
         when:
-        connection.receiveMessage(1)
+        connection.receiveMessage(1, OPERATION_CONTEXT)
 
         then:
         connection.lastUsedAt >= openedLastUsedAt
@@ -152,12 +151,12 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on receiveMessage asynchronously'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
         def futureResultCallback = new FutureResultCallback<Void>()
 
         when:
-        connection.receiveMessageAsync(1, futureResultCallback)
+        connection.receiveMessageAsync(1, OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
@@ -168,14 +167,13 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on sendAndReceive'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
 
         when:
         connection.sendAndReceive(new CommandMessage(new MongoNamespace('test.coll'),
                 new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(),
-                MessageSettings.builder().build(), SINGLE, null),
-                new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext())
+                MessageSettings.builder().build(), SINGLE, null), new BsonDocumentCodec(),  OPERATION_CONTEXT)
 
         then:
         connection.lastUsedAt >= openedLastUsedAt
@@ -185,7 +183,7 @@ class UsageTrackingConnectionSpecification extends Specification {
     def 'lastUsedAt should be set on sendAndReceive asynchronously'() {
         given:
         def connection = createConnection()
-        connection.open()
+        connection.open(OPERATION_CONTEXT)
         def openedLastUsedAt = connection.lastUsedAt
         def futureResultCallback = new FutureResultCallback<Void>()
 
@@ -193,8 +191,7 @@ class UsageTrackingConnectionSpecification extends Specification {
         connection.sendAndReceiveAsync(new CommandMessage(new MongoNamespace('test.coll'),
                 new BsonDocument('ping', new BsonInt32(1)), new NoOpFieldNameValidator(), primary(),
                 MessageSettings.builder().build(), SINGLE, null),
-                new BsonDocumentCodec(), NoOpSessionContext.INSTANCE, IgnorableRequestContext.INSTANCE, new OperationContext(),
-                futureResultCallback)
+                new BsonDocumentCodec(), OPERATION_CONTEXT, futureResultCallback)
         futureResultCallback.get()
 
         then:
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java
index e2ea7939880..5326c8c723d 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java
@@ -32,6 +32,7 @@
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getServerApi;
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE;
 import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply;
@@ -56,7 +57,8 @@ public void before() {
     public void testSuccessfulAuthentication() {
         enqueueSuccessfulAuthenticationReply();
 
-        new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()).authenticate(connection, connectionDescriptionThreeSix);
+        new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi())
+                .authenticate(connection, connectionDescriptionThreeSix, OPERATION_CONTEXT);
 
         validateMessages();
     }
@@ -67,7 +69,7 @@ public void testSuccessfulAuthenticationAsync() throws ExecutionException, Inter
 
         FutureResultCallback<Void> futureCallback = new FutureResultCallback<>();
         new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()).authenticateAsync(connection,
-                connectionDescriptionThreeSix, futureCallback);
+                connectionDescriptionThreeSix, OPERATION_CONTEXT, futureCallback);
 
         futureCallback.get();
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java
index 92ff72fde83..a8b2d7b71d5 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java
@@ -30,8 +30,8 @@
 import org.junit.Test;
 
 import java.util.List;
-import java.util.concurrent.ExecutionException;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getServerApi;
 import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply;
 import static com.mongodb.internal.connection.MessageHelper.getApiVersionField;
@@ -58,7 +58,7 @@ public void testFailedAuthentication() {
         enqueueFailedAuthenticationReply();
 
         try {
-            subject.authenticate(connection, connectionDescription);
+            subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT);
             fail();
         } catch (MongoSecurityException e) {
             // all good
@@ -70,7 +70,7 @@ public void testFailedAuthenticationAsync() {
         enqueueFailedAuthenticationReply();
 
         FutureResultCallback<Void> futureCallback = new FutureResultCallback<>();
-        subject.authenticateAsync(connection, connectionDescription, futureCallback);
+        subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback);
 
         try {
             futureCallback.get();
@@ -92,17 +92,17 @@ private void enqueueFailedAuthenticationReply() {
     public void testSuccessfulAuthentication() {
         enqueueSuccessfulAuthenticationReply();
 
-        subject.authenticate(connection, connectionDescription);
+        subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT);
 
         validateMessages();
     }
 
     @Test
-    public void testSuccessfulAuthenticationAsync() throws ExecutionException, InterruptedException {
+    public void testSuccessfulAuthenticationAsync() {
         enqueueSuccessfulAuthenticationReply();
 
         FutureResultCallback<Void> futureCallback = new FutureResultCallback<>();
-        subject.authenticateAsync(connection, connectionDescription, futureCallback);
+        subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback);
 
         futureCallback.get();
 
@@ -117,7 +117,7 @@ public void testSpeculativeAuthentication() {
                 + "user: \"CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US\", "
                 + "mechanism: \"MONGODB-X509\", db: \"$external\"}");
         subject.setSpeculativeAuthenticateResponse(BsonDocument.parse(speculativeAuthenticateResponse));
-        subject.authenticate(connection, connectionDescription);
+        subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT);
 
         assertEquals(connection.getSent().size(), 0);
         assertEquals(expectedSpeculativeAuthenticateCommand, subject.createSpeculativeAuthenticateCommand(connection));
diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java
index a5044ee8ccf..40d33c31288 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java
@@ -16,7 +16,6 @@
 package com.mongodb.internal.mockito;
 
 import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.operation.ListCollectionsOperation;
 import org.bson.BsonDocument;
 import org.bson.codecs.BsonDocumentCodec;
@@ -25,6 +24,7 @@
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.mockito.Mockito.when;
 
@@ -60,13 +60,13 @@ void mockObjectWithInsufficientStubbingDetector() {
     void stubbingWithThrowsException() {
         ReadBinding binding = Mockito.mock(ReadBinding.class,
                 new ThrowsException(new AssertionError("Unfortunately, you cannot do stubbing")));
-        assertThrows(AssertionError.class, () -> when(binding.getOperationContext()).thenReturn(new OperationContext()));
+        assertThrows(AssertionError.class, () -> when(binding.getOperationContext()).thenReturn(OPERATION_CONTEXT));
     }
 
     @Test
     void stubbingWithInsufficientStubbingDetector() {
         MongoMockito.mock(ReadBinding.class, bindingMock ->
-                when(bindingMock.getOperationContext()).thenReturn(new OperationContext())
+                when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT)
         );
     }
 }
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy
index 4381e54f2e5..998c0a28b6e 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy
@@ -18,8 +18,10 @@ package com.mongodb.internal.operation
 
 import com.mongodb.MongoException
 import com.mongodb.async.FutureResultCallback
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.async.SingleResultCallback
 import com.mongodb.internal.binding.AsyncReadBinding
+import com.mongodb.internal.connection.OperationContext
 import org.bson.Document
 import spock.lang.Specification
 
@@ -31,6 +33,12 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification {
         given:
         def changeStreamOpertation = Stub(ChangeStreamOperation)
         def binding = Mock(AsyncReadBinding)
+        def operationContext = Mock(OperationContext)
+        def timeoutContext = Mock(TimeoutContext)
+        binding.getOperationContext() >> operationContext
+        operationContext.getTimeoutContext() >> timeoutContext
+        timeoutContext.hasTimeoutMS() >> hasTimeoutMS
+
         def wrapped = Mock(AsyncCommandBatchCursor)
         def callback = Stub(SingleResultCallback)
         def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null,
@@ -61,11 +69,19 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification {
         then:
         0 * wrapped.close()
         0 * binding.release()
+
+        where:
+        hasTimeoutMS << [true, false]
     }
 
     def 'should not close the cursor in next if the cursor was closed before next completed'() {
         def changeStreamOpertation = Stub(ChangeStreamOperation)
         def binding = Mock(AsyncReadBinding)
+        def operationContext = Mock(OperationContext)
+        def timeoutContext = Mock(TimeoutContext)
+        binding.getOperationContext() >> operationContext
+        operationContext.getTimeoutContext() >> timeoutContext
+        timeoutContext.hasTimeoutMS() >> hasTimeoutMS
         def wrapped = Mock(AsyncCommandBatchCursor)
         def callback = Stub(SingleResultCallback)
         def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null,
@@ -86,11 +102,19 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification {
 
         then:
         cursor.isClosed()
+
+        where:
+        hasTimeoutMS << [true, false]
     }
 
     def 'should throw a MongoException when next/tryNext is called after the cursor is closed'() {
         def changeStreamOpertation = Stub(ChangeStreamOperation)
         def binding = Mock(AsyncReadBinding)
+        def operationContext = Mock(OperationContext)
+        def timeoutContext = Mock(TimeoutContext)
+        binding.getOperationContext() >> operationContext
+        operationContext.getTimeoutContext() >> timeoutContext
+        timeoutContext.hasTimeoutMS() >> hasTimeoutMS
         def wrapped = Mock(AsyncCommandBatchCursor)
         def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null,
                 ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION)
@@ -104,6 +128,9 @@ class AsyncChangeStreamBatchCursorSpecification extends Specification {
         then:
         def exception = thrown(MongoException)
         exception.getMessage() == 'next() called after the cursor was closed.'
+
+        where:
+        hasTimeoutMS << [true, false]
     }
 
     List<Document> nextBatch(AsyncChangeStreamBatchCursor cursor) {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy
index 7ba7db42a01..4ea54c05ed0 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy
@@ -22,14 +22,17 @@ import com.mongodb.MongoNamespace
 import com.mongodb.ServerAddress
 import com.mongodb.ServerCursor
 import com.mongodb.async.FutureResultCallback
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
 import com.mongodb.connection.ServerVersion
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.async.SingleResultCallback
 import com.mongodb.internal.binding.AsyncConnectionSource
 import com.mongodb.internal.connection.AsyncConnection
+import com.mongodb.internal.connection.OperationContext
 import org.bson.BsonArray
 import org.bson.BsonDocument
 import org.bson.BsonInt32
@@ -51,19 +54,22 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def initialConnection = referenceCountedAsyncConnection()
         def connection = referenceCountedAsyncConnection()
         def connectionSource = getAsyncConnectionSource(connection)
-        def cursor = new AsyncCommandBatchCursor<Document>(createCommandResult([], 42), batchSize, maxTimeMS, CODEC,
-                null, connectionSource, initialConnection)
+        def timeoutContext = connectionSource.getOperationContext().getTimeoutContext()
+        def firstBatch = createCommandResult([])
         def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID))
                 .append('collection', new BsonString(NAMESPACE.getCollectionName()))
         if (batchSize != 0) {
             expectedCommand.append('batchSize', new BsonInt32(batchSize))
         }
-        if (expectedMaxTimeFieldValue != null) {
-            expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue))
-        }
 
         def reply =  getMoreResponse([], 0)
 
+        when:
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC,
+                null, connectionSource, initialConnection)
+        then:
+        1 * timeoutContext.setMaxTimeOverride(*_)
+
         when:
         def batch = nextBatch(cursor)
 
@@ -97,7 +103,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def serverVersion = new ServerVersion([3, 6, 0])
         def connection = referenceCountedAsyncConnection(serverVersion)
         def connectionSource = getAsyncConnectionSource(connection)
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -126,7 +132,8 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def connectionSource = getAsyncConnectionSource(connection)
 
         when:
-        def cursor = new AsyncCommandBatchCursor<Document>(createCommandResult(FIRST_BATCH, 0), 0, 0, CODEC,
+        def firstBatch = createCommandResult(FIRST_BATCH, 0)
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -156,7 +163,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult([], CURSOR_ID)
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = nextBatch(cursor)
 
@@ -185,8 +192,8 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         connectionSource.getCount() == 0
 
         where:
-        response             | response2
-        getMoreResponse([])  | getMoreResponse(SECOND_BATCH, 0)
+        serverVersion                | response              | response2
+        new ServerVersion([3, 6, 0]) | getMoreResponse([])  | getMoreResponse(SECOND_BATCH, 0)
     }
 
     def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() {
@@ -199,9 +206,10 @@ class AsyncCommandBatchCursorSpecification extends Specification {
 
         def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA
         def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB
+        def firstBatch = createCommandResult()
 
         when:
-        def cursor = new AsyncCommandBatchCursor<Document>(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = nextBatch(cursor)
 
@@ -255,7 +263,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def connectionSource = getAsyncConnectionSource(connectionA, connectionB)
 
         when:
-        def cursor = new AsyncCommandBatchCursor<Document>(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = nextBatch(cursor)
 
@@ -291,7 +299,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def firstBatch = createCommandResult()
 
         when:
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = nextBatch(cursor)
 
@@ -331,7 +339,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
         def initialConnection = referenceCountedAsyncConnection()
         def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] }
         def firstBatch = createCommandResult()
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -351,7 +359,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult()
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -378,7 +386,7 @@ class AsyncCommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult()
-        def cursor = new AsyncCommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new AsyncCommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -511,6 +519,9 @@ class AsyncCommandBatchCursorSpecification extends Specification {
                     .state(ServerConnectionState.CONNECTED)
                     .build()
         }
+        OperationContext operationContext = Mock(OperationContext)
+        operationContext.getTimeoutContext() >> Mock(TimeoutContext)
+        mock.getOperationContext() >> operationContext
         mock.getConnection(_) >> {
             if (counter == 0) {
                 throw new IllegalStateException('Tried to use released AsyncConnectionSource')
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy
index f897413e12d..2e99b61efdf 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.operation
 
-
 import com.mongodb.MongoWriteConcernException
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
@@ -36,6 +35,7 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.Decoder
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync
 import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync
@@ -54,7 +54,7 @@ class AsyncOperationHelperSpecification extends Specification {
             getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0, 0])
             getServerType() >> ServerType.REPLICA_SET_PRIMARY
         }
-        def commandCreator = { serverDesc, connectionDesc -> command }
+        def commandCreator = { csot, serverDesc, connectionDesc -> command }
         def callback = new SingleResultCallback() {
             def result
             def throwable
@@ -73,24 +73,26 @@ class AsyncOperationHelperSpecification extends Specification {
             _ * getDescription() >> connectionDescription
         }
 
+        def operationContext = OPERATION_CONTEXT.withSessionContext(
+                Stub(SessionContext) {
+                    hasSession() >> true
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
         def connectionSource = Stub(AsyncConnectionSource) {
-            getServerApi() >> null
             getConnection(_) >> { it[0].onResult(connection, null) }
-            _ * getServerDescription() >> serverDescription
+            getServerDescription() >> serverDescription
+            getOperationContext() >> operationContext
         }
         def asyncWriteBinding = Stub(AsyncWriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
 
         when:
-        executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder,
-                commandCreator, FindAndModifyHelper.asyncTransformer(), { cmd -> cmd }, callback)
+        executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(),
+                new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.asyncTransformer(),
+                { cmd -> cmd }, callback)
 
         then:
         2 * connection.commandAsync(dbName, command, _, primary(), decoder, *_) >> { it.last().onResult(results.poll(), null) }
@@ -107,11 +109,9 @@ class AsyncOperationHelperSpecification extends Specification {
         def callback = Stub(SingleResultCallback)
         def connection = Mock(AsyncConnection)
         def connectionSource = Stub(AsyncConnectionSource) {
-            getServerApi() >> null
             getConnection(_) >> { it[0].onResult(connection, null) }
         }
         def asyncWriteBinding = Stub(AsyncWriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
         }
         def connectionDescription = Stub(ConnectionDescription)
@@ -129,18 +129,18 @@ class AsyncOperationHelperSpecification extends Specification {
         given:
         def dbName = 'db'
         def command = new BsonDocument('fakeCommandName', BsonNull.VALUE)
-        def commandCreator = { serverDescription, connectionDescription -> command }
+        def commandCreator = { csot, serverDescription, connectionDescription -> command }
         def decoder = Stub(Decoder)
         def callback = Stub(SingleResultCallback)
         def function = Stub(CommandReadTransformerAsync)
         def connection = Mock(AsyncConnection)
         def connectionSource = Stub(AsyncConnectionSource) {
-            getServerApi() >> null
+            getOperationContext() >> OPERATION_CONTEXT
             getConnection(_) >> { it[0].onResult(connection, null) }
             getReadPreference() >> readPreference
         }
         def asyncReadBinding = Stub(AsyncReadBinding) {
-            getServerApi() >> null
+            getOperationContext() >> OPERATION_CONTEXT
             getReadConnectionSource(_)  >> { it[0].onResult(connectionSource, null) }
         }
         def connectionDescription = Stub(ConnectionDescription)
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy
index c7e1a0d4363..2ccd3513cf7 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy
@@ -31,10 +31,14 @@ import com.mongodb.connection.ConnectionId
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerId
 import com.mongodb.connection.ServerType
+import com.mongodb.internal.IgnorableRequestContext
+import com.mongodb.internal.TimeoutContext
+import com.mongodb.internal.TimeoutSettings
 import com.mongodb.internal.bulk.DeleteRequest
 import com.mongodb.internal.bulk.InsertRequest
 import com.mongodb.internal.bulk.UpdateRequest
 import com.mongodb.internal.bulk.WriteRequest
+import com.mongodb.internal.connection.OperationContext
 import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext
 import org.bson.BsonDocument
 import org.bson.BsonInt32
@@ -45,6 +49,7 @@ import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE
 import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE
 
 class BulkWriteBatchSpecification extends Specification {
+    private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0, 0))
     def namespace = new MongoNamespace('db.coll')
     def serverDescription = ServerDescription.builder().address(new ServerAddress()).state(CONNECTED)
             .logicalSessionTimeoutMinutes(30)
@@ -53,11 +58,12 @@ class BulkWriteBatchSpecification extends Specification {
             new ConnectionId(new ServerId(new ClusterId(), serverDescription.getAddress())), 6,
             ServerType.REPLICA_SET_PRIMARY, 1000, 16000, 48000, [])
     def sessionContext = new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT)
+    def operationContext = new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, TIMEOUT_CONTEXT, null)
 
     def 'should split payloads by type when ordered'() {
         when:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true,
-                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests(), sessionContext, null, null)
+                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests(), operationContext, null, null)
         def payload = bulkWriteBatch.getPayload()
         payload.setPosition(payload.size())
 
@@ -137,7 +143,7 @@ class BulkWriteBatchSpecification extends Specification {
     def 'should group payloads by type when unordered'() {
         when:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false,
-                WriteConcern.MAJORITY, true, false, getWriteRequests(), sessionContext, null, null)
+                WriteConcern.MAJORITY, true, false, getWriteRequests(), operationContext, null, null)
         def payload = bulkWriteBatch.getPayload()
         payload.setPosition(payload.size())
 
@@ -189,7 +195,7 @@ class BulkWriteBatchSpecification extends Specification {
     def 'should split payloads if only payload partially processed'() {
         when:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false,
-                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..3], sessionContext, null, null)
+                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..3], operationContext, null, null)
         def payload = bulkWriteBatch.getPayload()
         payload.setPosition(1)
 
@@ -237,7 +243,7 @@ class BulkWriteBatchSpecification extends Specification {
                  new InsertRequest(toBsonDocument('{_id: 1}')),
                  new InsertRequest(toBsonDocument('{_id: 2}'))
                 ],
-                sessionContext, null, null)
+                operationContext, null, null)
         def payload = bulkWriteBatch.getPayload()
         payload.setPosition(1)
         payload.insertedIds.put(0, new BsonInt32(0))
@@ -278,7 +284,7 @@ class BulkWriteBatchSpecification extends Specification {
                  new InsertRequest(toBsonDocument('{_id: 1}')),
                  new InsertRequest(toBsonDocument('{_id: 2}'))
                 ],
-                sessionContext, null, null)
+                operationContext, null, null)
         def payload = bulkWriteBatch.getPayload()
         payload.setPosition(3)
         payload.insertedIds.put(0, new BsonInt32(0))
@@ -300,7 +306,7 @@ class BulkWriteBatchSpecification extends Specification {
         when:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false,
                 WriteConcern.ACKNOWLEDGED, null, true,
-                [new DeleteRequest(new BsonDocument()).multi(true), new InsertRequest(new BsonDocument())], sessionContext, null, null)
+                [new DeleteRequest(new BsonDocument()).multi(true), new InsertRequest(new BsonDocument())], operationContext, null, null)
 
         then:
         !bulkWriteBatch.getRetryWrites()
@@ -309,7 +315,7 @@ class BulkWriteBatchSpecification extends Specification {
     def 'should handle operation responses'() {
         given:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true,
-                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[1..1], sessionContext, null, null)
+                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[1..1], operationContext, null, null)
         def writeConcernError = toBsonDocument('{ok: 1, n: 1, upserted: [{_id: 2, index: 0}]}')
 
         when:
@@ -324,7 +330,7 @@ class BulkWriteBatchSpecification extends Specification {
     def 'should handle writeConcernError error responses'() {
         given:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true,
-                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], sessionContext, null, null)
+                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null)
         def writeConcernError = toBsonDocument('{n: 1, writeConcernError: {code: 75, errmsg: "wtimeout", errInfo: {wtimeout: "0"}}}')
 
         when:
@@ -340,7 +346,7 @@ class BulkWriteBatchSpecification extends Specification {
     def 'should handle writeErrors error responses'() {
         given:
         def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true,
-                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], sessionContext, null, null)
+                WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null)
         def writeError = toBsonDocument('''{"ok": 0, "n": 1, "code": 65, "errmsg": "bulk op errors",
             "writeErrors": [{ "index" : 0, "code" : 100, "errmsg": "some error"}] }''')
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java
new file mode 100644
index 00000000000..48c3a50e79a
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.internal.operation;
+
+import com.mongodb.MongoException;
+import com.mongodb.MongoNotPrimaryException;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.ServerAddress;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.binding.ConnectionSource;
+import com.mongodb.internal.binding.ReadBinding;
+import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
+import org.bson.BsonDocument;
+import org.bson.BsonInt32;
+import org.bson.Document;
+import org.bson.RawBsonDocument;
+import org.bson.codecs.DocumentCodec;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.clearInvocations;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoInteractions;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
+final class ChangeStreamBatchCursorTest {
+
+    private static final List<RawBsonDocument> RESULT_FROM_NEW_CURSOR = new ArrayList<>();
+    private final int maxWireVersion = ServerVersionHelper.SIX_DOT_ZERO_WIRE_VERSION;
+    private ServerDescription serverDescription;
+    private TimeoutContext timeoutContext;
+    private OperationContext operationContext;
+    private Connection connection;
+    private ConnectionSource connectionSource;
+    private ReadBinding readBinding;
+    private BsonDocument resumeToken;
+    private CommandBatchCursor<RawBsonDocument> commandBatchCursor;
+    private CommandBatchCursor<RawBsonDocument> newCommandBatchCursor;
+    private ChangeStreamBatchCursor<Document> newChangeStreamCursor;
+    private ChangeStreamOperation<Document> changeStreamOperation;
+
+    @Test
+    @DisplayName("should return result on next")
+    void shouldReturnResultOnNext() {
+        when(commandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR);
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+
+        //when
+        List<Document> next = cursor.next();
+
+        //then
+        assertEquals(RESULT_FROM_NEW_CURSOR, next);
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(commandBatchCursor, times(1)).next();
+        verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(commandBatchCursor);
+        verify(changeStreamOperation, times(1)).getDecoder();
+        verifyNoMoreInteractions(changeStreamOperation);
+    }
+
+    @Test
+    @DisplayName("should throw timeout exception without resume attempt on next")
+    void shouldThrowTimeoutExceptionWithoutResumeAttemptOnNext() {
+        when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout"));
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+        //when
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+
+        //then
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(commandBatchCursor, times(1)).next();
+        verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(commandBatchCursor);
+        verifyNoResumeAttemptCalled();
+    }
+
+    @Test
+    @DisplayName("should perform resume attempt on next when resumable error is thrown")
+    void shouldPerformResumeAttemptOnNextWhenResumableErrorIsThrown() {
+        when(commandBatchCursor.next()).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()));
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+        //when
+        List<Document> next = cursor.next();
+
+        //then
+        assertEquals(RESULT_FROM_NEW_CURSOR, next);
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(commandBatchCursor, times(1)).next();
+        verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyResumeAttemptCalled();
+        verify(changeStreamOperation, times(1)).getDecoder();
+        verify(newCommandBatchCursor, times(1)).next();
+        verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(newCommandBatchCursor);
+        verifyNoMoreInteractions(changeStreamOperation);
+    }
+
+
+    @Test
+    @DisplayName("should resume only once on subsequent calls after timeout error")
+    void shouldResumeOnlyOnceOnSubsequentCallsAfterTimeoutError() {
+        when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout"));
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+        //when
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+
+        //then
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(commandBatchCursor, times(1)).next();
+        verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(commandBatchCursor);
+        verifyNoResumeAttemptCalled();
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+
+        //when seconds next is called. Resume is attempted.
+        List<Document> next = cursor.next();
+
+        //then
+        assertEquals(Collections.emptyList(), next);
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(commandBatchCursor, times(1)).close();
+        verifyNoMoreInteractions(commandBatchCursor);
+        verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion);
+        verify(changeStreamOperation, times(1)).getDecoder();
+        verify(changeStreamOperation, times(1)).execute(readBinding);
+        verifyNoMoreInteractions(changeStreamOperation);
+        verify(newCommandBatchCursor, times(1)).next();
+        verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+
+        //when third next is called. No resume is attempted.
+        List<Document> next2 = cursor.next();
+
+        //then
+        assertEquals(Collections.emptyList(), next2);
+        verifyNoInteractions(commandBatchCursor);
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verify(newCommandBatchCursor, times(1)).next();
+        verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(newCommandBatchCursor);
+        verify(changeStreamOperation, times(1)).getDecoder();
+        verifyNoMoreInteractions(changeStreamOperation);
+        verifyNoInteractions(readBinding);
+        verifyNoMoreInteractions(changeStreamOperation);
+    }
+
+    @Test
+    @DisplayName("should propagate any errors occurred in aggregate operation during creating new change stream when previous next timed out")
+    void shouldPropagateAnyErrorsOccurredInAggregateOperation() {
+        when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout"));
+        MongoNotPrimaryException resumableError = new MongoNotPrimaryException(new BsonDocument(), new ServerAddress());
+        when(changeStreamOperation.execute(readBinding)).thenThrow(resumableError);
+
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+        //when
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+        assertThrows(MongoNotPrimaryException.class, cursor::next);
+
+        //then
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verifyResumeAttemptCalled();
+        verifyNoMoreInteractions(changeStreamOperation);
+        verifyNoInteractions(newCommandBatchCursor);
+    }
+
+
+    @Test
+    @DisplayName("should perform a resume attempt in subsequent next call when previous resume attempt in next timed out")
+    void shouldResumeAfterTimeoutInAggregateOnNextCall() {
+        //given
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+
+        //first next operation times out on getMore
+        when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call"));
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+
+        //second next operation times out on resume attempt when creating change stream
+        when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoOperationTimeoutException("timeout during resumption"));
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation);
+
+        doReturn(newChangeStreamCursor).when(changeStreamOperation).execute(readBinding);
+
+        //when third operation succeeds to resume and call next
+        List<Document> next = cursor.next();
+
+        //then
+        assertEquals(RESULT_FROM_NEW_CURSOR, next);
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+
+        verifyResumeAttemptCalled();
+        verify(changeStreamOperation, times(1)).getDecoder();
+        verifyNoMoreInteractions(changeStreamOperation);
+
+        verify(newCommandBatchCursor, times(1)).next();
+        verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken();
+        verifyNoMoreInteractions(newCommandBatchCursor);
+    }
+
+    @Test
+    @DisplayName("should close change stream when resume operation fails due to non-timeout error")
+    void shouldCloseChangeStreamWhenResumeOperationFailsDueToNonTimeoutError() {
+        //given
+        ChangeStreamBatchCursor<Document> cursor = createChangeStreamCursor();
+
+        //first next operation times out on getMore
+        when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call"));
+        assertThrows(MongoOperationTimeoutException.class, cursor::next);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+
+        //when second next operation errors on resume attempt when creating change stream
+        when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()));
+        assertThrows(MongoNotPrimaryException.class, cursor::next);
+
+        //then
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verifyResumeAttemptCalled();
+        verifyNoMoreInteractions(changeStreamOperation);
+        verifyNoInteractions(newCommandBatchCursor);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+
+
+        //when third next operation errors with cursor closed exception
+        doThrow(new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR)).when(commandBatchCursor).next();
+        MongoException mongoException = assertThrows(MongoException.class, cursor::next);
+
+        //then
+        assertEquals(MESSAGE_IF_CLOSED_AS_CURSOR, mongoException.getMessage());
+        verify(timeoutContext, times(1)).resetTimeoutIfPresent();
+        verifyNoResumeAttemptCalled();
+    }
+
+    private ChangeStreamBatchCursor<Document> createChangeStreamCursor() {
+        ChangeStreamBatchCursor<Document> cursor =
+                new ChangeStreamBatchCursor<>(changeStreamOperation, commandBatchCursor, readBinding, null, maxWireVersion);
+        clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding);
+        return cursor;
+    }
+
+    private void verifyNoResumeAttemptCalled() {
+        verifyNoInteractions(changeStreamOperation);
+        verifyNoInteractions(newCommandBatchCursor);
+        verifyNoInteractions(readBinding);
+    }
+
+
+    private void verifyResumeAttemptCalled() {
+        verify(commandBatchCursor, times(1)).close();
+        verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion);
+        verify(changeStreamOperation, times(1)).execute(readBinding);
+        verifyNoMoreInteractions(commandBatchCursor);
+    }
+
+    @BeforeEach
+    @SuppressWarnings("unchecked")
+    void setUp() {
+        resumeToken = new BsonDocument("_id", new BsonInt32(1));
+        serverDescription = mock(ServerDescription.class);
+        when(serverDescription.getMaxWireVersion()).thenReturn(maxWireVersion);
+
+        timeoutContext = mock(TimeoutContext.class);
+        when(timeoutContext.hasTimeoutMS()).thenReturn(true);
+        doNothing().when(timeoutContext).resetTimeoutIfPresent();
+
+        operationContext = mock(OperationContext.class);
+        when(operationContext.getTimeoutContext()).thenReturn(timeoutContext);
+        connection = mock(Connection.class);
+        when(connection.command(any(), any(), any(), any(), any(), any())).thenReturn(null);
+        connectionSource = mock(ConnectionSource.class);
+        when(connectionSource.getConnection()).thenReturn(connection);
+        when(connectionSource.release()).thenReturn(1);
+        when(connectionSource.getServerDescription()).thenReturn(serverDescription);
+
+        readBinding = mock(ReadBinding.class);
+        when(readBinding.getOperationContext()).thenReturn(operationContext);
+        when(readBinding.retain()).thenReturn(readBinding);
+        when(readBinding.release()).thenReturn(1);
+        when(readBinding.getReadConnectionSource()).thenReturn(connectionSource);
+
+
+        commandBatchCursor = mock(CommandBatchCursor.class);
+        when(commandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken);
+        doNothing().when(commandBatchCursor).close();
+
+        newCommandBatchCursor = mock(CommandBatchCursor.class);
+        when(newCommandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken);
+        when(newCommandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR);
+        doNothing().when(newCommandBatchCursor).close();
+
+        newChangeStreamCursor = mock(ChangeStreamBatchCursor.class);
+        when(newChangeStreamCursor.getWrapped()).thenReturn(newCommandBatchCursor);
+
+        changeStreamOperation = mock(ChangeStreamOperation.class);
+        when(changeStreamOperation.getDecoder()).thenReturn(new DocumentCodec());
+        doNothing().when(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion);
+        when(changeStreamOperation.execute(readBinding)).thenReturn(newChangeStreamCursor);
+    }
+
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy
index 38496f02552..72e9e135b42 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy
@@ -23,13 +23,16 @@ import com.mongodb.MongoSocketException
 import com.mongodb.MongoSocketOpenException
 import com.mongodb.ServerAddress
 import com.mongodb.ServerCursor
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
 import com.mongodb.connection.ServerVersion
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.binding.ConnectionSource
 import com.mongodb.internal.connection.Connection
+import com.mongodb.internal.connection.OperationContext
 import org.bson.BsonArray
 import org.bson.BsonDocument
 import org.bson.BsonInt32
@@ -51,21 +54,24 @@ class CommandBatchCursorSpecification extends Specification {
         def initialConnection = referenceCountedConnection()
         def connection = referenceCountedConnection()
         def connectionSource = getConnectionSource(connection)
+        def timeoutContext = connectionSource.getOperationContext().getTimeoutContext()
 
         def firstBatch = createCommandResult([])
-        def cursor = new CommandBatchCursor<Document>(firstBatch, batchSize, maxTimeMS, CODEC,
-                null, connectionSource, initialConnection)
         def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID))
                 .append('collection', new BsonString(NAMESPACE.getCollectionName()))
         if (batchSize != 0) {
             expectedCommand.append('batchSize', new BsonInt32(batchSize))
         }
-        if (expectedMaxTimeFieldValue != null) {
-            expectedCommand.append('maxTimeMS', new BsonInt64(expectedMaxTimeFieldValue))
-        }
 
         def reply =  getMoreResponse([], 0)
 
+        when:
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC,
+                null, connectionSource, initialConnection)
+
+        then:
+        1 * timeoutContext.setMaxTimeOverride(*_)
+
         when:
         cursor.hasNext()
 
@@ -96,7 +102,7 @@ class CommandBatchCursorSpecification extends Specification {
         def serverVersion = new ServerVersion([3, 6, 0])
         def connection = referenceCountedConnection(serverVersion)
         def connectionSource = getConnectionSource(connection)
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -124,7 +130,7 @@ class CommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult(FIRST_BATCH, 0)
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -148,7 +154,7 @@ class CommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult([], CURSOR_ID)
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = cursor.next()
 
@@ -202,7 +208,7 @@ class CommandBatchCursorSpecification extends Specification {
         def firstBatch = createCommandResult()
 
         when:
-        CommandBatchCursor<Document> cursor = new CommandBatchCursor<>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         List<Document> batch = cursor.next()
 
@@ -254,7 +260,7 @@ class CommandBatchCursorSpecification extends Specification {
         def connectionSource = getConnectionSource(connectionA, connectionB)
 
         when:
-        def cursor = new CommandBatchCursor<Document>(createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = cursor.next()
 
@@ -290,7 +296,7 @@ class CommandBatchCursorSpecification extends Specification {
         def firstBatch = createCommandResult()
 
         when:
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
         def batch = cursor.next()
 
@@ -329,7 +335,7 @@ class CommandBatchCursorSpecification extends Specification {
         def initialConnection = referenceCountedConnection()
         def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION }
         def firstBatch = createCommandResult()
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -350,7 +356,7 @@ class CommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult()
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -377,7 +383,7 @@ class CommandBatchCursorSpecification extends Specification {
 
         when:
         def firstBatch = createCommandResult()
-        def cursor = new CommandBatchCursor<Document>(firstBatch, 0, 0, CODEC,
+        def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC,
                 null, connectionSource, initialConnection)
 
         then:
@@ -437,7 +443,7 @@ class CommandBatchCursorSpecification extends Specification {
         connectionSource.retain() >> connectionSource
 
         def initialResults = createCommandResult([])
-        def cursor = new CommandBatchCursor<Document>(initialResults, 2, 100, new DocumentCodec(),
+        def cursor = new CommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -463,7 +469,7 @@ class CommandBatchCursorSpecification extends Specification {
         connectionSource.retain() >> connectionSource
 
         def initialResults = createCommandResult([])
-        def cursor = new CommandBatchCursor<Document>(initialResults, 2, 100, new DocumentCodec(),
+        def cursor = new CommandBatchCursor<Document>(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC,
                 null, connectionSource, initialConnection)
 
         when:
@@ -563,6 +569,9 @@ class CommandBatchCursorSpecification extends Specification {
                     .state(ServerConnectionState.CONNECTED)
                     .build()
         }
+        OperationContext operationContext = Mock(OperationContext)
+        operationContext.getTimeoutContext() >> Mock(TimeoutContext)
+        mock.getOperationContext() >> operationContext
         mock.getConnection() >> {
             if (counter == 0) {
                 throw new IllegalStateException('Tried to use released ConnectionSource')
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java
new file mode 100644
index 00000000000..3380785bd70
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.operation;
+
+
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoSocketException;
+import com.mongodb.ServerAddress;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.connection.ConnectionDescription;
+import com.mongodb.connection.ServerDescription;
+import com.mongodb.connection.ServerType;
+import com.mongodb.connection.ServerVersion;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.binding.ConnectionSource;
+import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
+import org.bson.BsonArray;
+import org.bson.BsonDocument;
+import org.bson.BsonInt32;
+import org.bson.BsonInt64;
+import org.bson.BsonString;
+import org.bson.Document;
+import org.bson.codecs.Decoder;
+import org.bson.codecs.DocumentCodec;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+class CommandBatchCursorTest {
+
+    private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test");
+    private static final BsonInt64 CURSOR_ID = new BsonInt64(1);
+    private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1))
+            .append("cursor",
+                    new BsonDocument("ns", new BsonString(NAMESPACE.getFullName()))
+                            .append("id", CURSOR_ID)
+                            .append("firstBatch", new BsonArrayWrapper<>(new BsonArray())));
+
+    private static final Decoder<Document> DOCUMENT_CODEC = new DocumentCodec();
+
+
+    private Connection mockConnection;
+    private ConnectionDescription mockDescription;
+    private ConnectionSource connectionSource;
+    private OperationContext operationContext;
+    private TimeoutContext timeoutContext;
+    private ServerDescription serverDescription;
+
+    @BeforeEach
+    void setUp() {
+        ServerVersion serverVersion = new ServerVersion(3, 6);
+
+        mockConnection = mock(Connection.class, "connection");
+        mockDescription = mock(ConnectionDescription.class);
+        when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList()));
+        when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(mockConnection.getDescription()).thenReturn(mockDescription);
+        when(mockConnection.retain()).thenReturn(mockConnection);
+
+        connectionSource = mock(ConnectionSource.class);
+        operationContext = mock(OperationContext.class);
+        timeoutContext = mock(TimeoutContext.class);
+        serverDescription = mock(ServerDescription.class);
+        when(operationContext.getTimeoutContext()).thenReturn(timeoutContext);
+        when(connectionSource.getOperationContext()).thenReturn(operationContext);
+        when(connectionSource.getConnection()).thenReturn(mockConnection);
+        when(connectionSource.getServerDescription()).thenReturn(serverDescription);
+    }
+
+
+    @Test
+    void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() {
+        //given
+        when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow(
+                new MongoSocketException("test", new ServerAddress()));
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+
+        CommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+        //when
+        Assertions.assertThrows(MongoSocketException.class, commandBatchCursor::next);
+
+        //then
+        commandBatchCursor.close();
+        verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any());
+    }
+
+    private CommandBatchCursor<Document> createBatchCursor() {
+        return new CommandBatchCursor<>(
+                TimeoutMode.CURSOR_LIFETIME,
+                COMMAND_CURSOR_DOCUMENT,
+                0,
+                0,
+                DOCUMENT_CODEC,
+                null,
+                connectionSource,
+                mockConnection);
+    }
+
+    @Test
+    void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() {
+        //given
+        when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow(
+                new MongoOperationTimeoutException("test"));
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(timeoutContext.hasTimeoutMS()).thenReturn(true);
+
+        CommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+
+        //when
+        Assertions.assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next);
+
+        commandBatchCursor.close();
+
+
+        //then
+        verify(mockConnection, times(2)).command(any(),
+                any(), any(), any(), any(), any());
+        verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any());
+        verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any());
+    }
+
+    @Test
+    void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() {
+        //given
+        when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow(
+                new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress())));
+        when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER);
+        when(timeoutContext.hasTimeoutMS()).thenReturn(true);
+
+        CommandBatchCursor<Document> commandBatchCursor = createBatchCursor();
+
+        //when
+        Assertions.assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next);
+        commandBatchCursor.close();
+
+        //then
+        verify(mockConnection, times(1)).command(any(),
+                any(), any(), any(), any(), any());
+        verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any());
+        verify(mockConnection, never()).command(eq(NAMESPACE.getDatabaseName()),
+                argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any());
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy
index dc17329ae91..21ae1c4dfb9 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy
@@ -18,20 +18,25 @@ package com.mongodb.internal.operation
 
 import com.mongodb.MongoException
 import com.mongodb.MongoTimeoutException
+import com.mongodb.ReadConcern
 import com.mongodb.WriteConcern
 import com.mongodb.async.FutureResultCallback
 import com.mongodb.internal.binding.AsyncWriteBinding
 import com.mongodb.internal.binding.WriteBinding
 import com.mongodb.internal.session.SessionContext
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class CommitTransactionOperationUnitSpecification extends OperationUnitSpecification {
     def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException'() {
         given:
+        def sessionContext = Stub(SessionContext) {
+            getReadConcern() >> ReadConcern.DEFAULT
+            hasActiveTransaction() >> true
+        }
         def writeBinding = Stub(WriteBinding) {
             getWriteConnectionSource() >> { throw new MongoTimeoutException('Time out!') }
-            getSessionContext() >> Stub(SessionContext) {
-                hasActiveTransaction() >> true
-            }
+            getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext)
         }
         def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED)
 
@@ -45,13 +50,15 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica
 
     def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException asynchronously'() {
         given:
+        def sessionContext = Stub(SessionContext) {
+            getReadConcern() >> ReadConcern.DEFAULT
+            hasActiveTransaction() >> true
+        }
         def writeBinding = Stub(AsyncWriteBinding) {
             getWriteConnectionSource(_) >> {
                 it[0].onResult(null, new MongoTimeoutException('Time out!'))
             }
-            getSessionContext() >> Stub(SessionContext) {
-                hasActiveTransaction() >> true
-            }
+            getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext)
         }
         def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED)
         def callback = new FutureResultCallback()
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java
index 15a8bd972f1..d631daf2e21 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java
@@ -15,8 +15,10 @@
  */
 package com.mongodb.internal.operation;
 
+import com.mongodb.ClusterFixture;
 import com.mongodb.MongoNamespace;
 import com.mongodb.ServerCursor;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.internal.binding.AsyncConnectionSource;
 import com.mongodb.internal.binding.ReferenceCounted;
 import com.mongodb.internal.connection.Connection;
@@ -30,6 +32,8 @@ final class CursorResourceManagerTest {
     @Test
     void doubleCloseExecutedConcurrentlyWithOperationBeingInProgressShouldNotFail() {
         CursorResourceManager<?, ?> cursorResourceManager = new CursorResourceManager<ReferenceCounted, ReferenceCounted>(
+                ClusterFixture.OPERATION_CONTEXT.getTimeoutContext(),
+                TimeoutMode.CURSOR_LIFETIME,
                 new MongoNamespace("db", "coll"),
                 MongoMockito.mock(AsyncConnectionSource.class, mock -> {
                     when(mock.retain()).thenReturn(mock);
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy
index b2bd9019ef5..021b392593c 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy
@@ -28,7 +28,6 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.DocumentCodec
 
 import static com.mongodb.CursorType.TailableAwait
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 
 class FindOperationUnitSpecification extends OperationUnitSpecification {
 
@@ -41,7 +40,8 @@ class FindOperationUnitSpecification extends OperationUnitSpecification {
         testOperation(operation, [3, 2, 0], expectedCommand, async, commandResult)
         // Overrides
         when:
-        operation.filter(new BsonDocument('a', BsonBoolean.TRUE))
+        operation = new FindOperation<BsonDocument>(namespace, new BsonDocumentCodec())
+                .filter(new BsonDocument('a', BsonBoolean.TRUE))
                 .projection(new BsonDocument('x', new BsonInt32(1)))
                 .skip(2)
                 .limit(limit)
@@ -49,7 +49,7 @@ class FindOperationUnitSpecification extends OperationUnitSpecification {
                 .cursorType(TailableAwait)
                 .noCursorTimeout(true)
                 .partial(true)
-                .maxTime(10, MILLISECONDS)
+
                 .comment(new BsonString('my comment'))
                 .hint(BsonDocument.parse('{ hint : 1}'))
                 .min(BsonDocument.parse('{ abc: 99 }'))
@@ -68,7 +68,6 @@ class FindOperationUnitSpecification extends OperationUnitSpecification {
                 .append('awaitData', BsonBoolean.TRUE)
                 .append('allowPartialResults', BsonBoolean.TRUE)
                 .append('noCursorTimeout', BsonBoolean.TRUE)
-                .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS)))
                 .append('comment', operation.getComment())
                 .append('hint', operation.getHint())
                 .append('min', operation.getMin())
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java
index 4a4654b38a1..12a964db625 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java
@@ -27,7 +27,6 @@
 import com.mongodb.internal.binding.ConnectionSource;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.internal.connection.Connection;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBoolean;
 import org.bson.BsonDocument;
@@ -40,10 +39,10 @@
 import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.internal.mockito.MongoMockito.mock;
 import static java.util.Collections.emptyList;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.mockito.ArgumentCaptor.forClass;
@@ -68,13 +67,11 @@ void executedCommandIsCorrect() {
         boolean nameOnly = true;
         boolean authorizedCollections = true;
         int batchSize = 123;
-        long maxTime = 1234;
         BsonValue comment = new BsonString("comment");
         operation.filter(filter)
                 .nameOnly(nameOnly)
                 .authorizedCollections(authorizedCollections)
                 .batchSize(batchSize)
-                .maxTime(maxTime, MILLISECONDS)
                 .comment(comment);
         assertEquals(
                 new BsonDocument()
@@ -85,7 +82,6 @@ void executedCommandIsCorrect() {
                         .append("cursor", new BsonDocument()
                                 .append("batchSize", new BsonInt32(batchSize))
                         )
-                        .append("maxTimeMS", new BsonInt64(maxTime))
                         .append("comment", comment),
                 executeOperationAndCaptureCommand()
         );
@@ -112,9 +108,9 @@ private BsonDocument executeOperationAndCaptureCommand() {
     private static Mocks mocks(final MongoNamespace namespace) {
         Mocks result = new Mocks();
         result.readBinding(mock(ReadBinding.class, bindingMock -> {
-            OperationContext operationContext = new OperationContext();
-            when(bindingMock.getOperationContext()).thenReturn(operationContext);
+            when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT);
             ConnectionSource connectionSource = mock(ConnectionSource.class, connectionSourceMock -> {
+                when(connectionSourceMock.getOperationContext()).thenReturn(OPERATION_CONTEXT);
                 when(connectionSourceMock.release()).thenReturn(1);
                 ServerAddress serverAddress = new ServerAddress();
                 result.connection(mock(Connection.class, connectionMock -> {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy
index ff664a594ea..fd9786e8dbf 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy
@@ -32,6 +32,7 @@ import org.bson.BsonArray
 import org.bson.BsonDocument
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.WriteConcern.ACKNOWLEDGED
 import static com.mongodb.WriteConcern.UNACKNOWLEDGED
 import static com.mongodb.connection.ServerConnectionState.CONNECTED
@@ -107,8 +108,8 @@ class OperationHelperSpecification extends Specification {
         }
 
         expect:
-        canRetryRead(retryableServerDescription, noTransactionSessionContext)
-        !canRetryRead(retryableServerDescription, activeTransactionSessionContext)
+        canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(noTransactionSessionContext))
+        !canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(activeTransactionSessionContext))
     }
 
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy
index 01ad72455fb..11710eff7df 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy
@@ -41,6 +41,8 @@ import spock.lang.Specification
 
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class OperationUnitSpecification extends Specification {
 
     // Have to add to this map for every server release
@@ -95,6 +97,12 @@ class OperationUnitSpecification extends Specification {
     def testSyncOperation(operation, List<Integer> serverVersion, result, Boolean checkCommand=true,
                           BsonDocument expectedCommand=null,
                           Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) {
+        def operationContext = OPERATION_CONTEXT
+                .withSessionContext(Stub(SessionContext) {
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
+
         def connection = Mock(Connection) {
             _ * getDescription() >> Stub(ConnectionDescription) {
                 getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion)
@@ -104,20 +112,16 @@ class OperationUnitSpecification extends Specification {
         def connectionSource = Stub(ConnectionSource) {
             getConnection() >> connection
             getReadPreference() >> readPreference
-            getServerApi() >> null
+            getOperationContext() >> operationContext
         }
         def readBinding = Stub(ReadBinding) {
             getReadConnectionSource() >> connectionSource
             getReadPreference() >> readPreference
-            getServerApi() >> null
-            getSessionContext() >> Stub(SessionContext) {
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
         def writeBinding = Stub(WriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource() >> connectionSource
+            getOperationContext() >> operationContext
         }
 
         if (checkCommand) {
@@ -149,6 +153,13 @@ class OperationUnitSpecification extends Specification {
     def testAsyncOperation(operation, List<Integer> serverVersion, result = null,
                            Boolean checkCommand=true, BsonDocument expectedCommand=null,
                            Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) {
+
+        def operationContext = OPERATION_CONTEXT
+                .withSessionContext(Stub(SessionContext) {
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
+
         def connection = Mock(AsyncConnection) {
             _ * getDescription() >> Stub(ConnectionDescription) {
                 getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion)
@@ -156,22 +167,18 @@ class OperationUnitSpecification extends Specification {
         }
 
         def connectionSource = Stub(AsyncConnectionSource) {
-            getServerApi() >> null
-            getReadPreference() >> readPreference
             getConnection(_) >> { it[0].onResult(connection, null) }
+            getReadPreference() >> readPreference
+            getOperationContext() >> getOperationContext() >> operationContext
         }
         def readBinding = Stub(AsyncReadBinding) {
-            getServerApi() >> null
             getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
             getReadPreference() >> readPreference
-            getSessionContext() >> Stub(SessionContext) {
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
         def writeBinding = Stub(AsyncWriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) }
+            getOperationContext() >> operationContext
         }
         def callback = new FutureResultCallback()
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy
index a18148911bf..ab6b6e252ab 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy
@@ -16,7 +16,6 @@
 
 package com.mongodb.internal.operation
 
-
 import com.mongodb.MongoWriteConcernException
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
@@ -35,6 +34,7 @@ import org.bson.codecs.BsonDocumentCodec
 import org.bson.codecs.Decoder
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion
 import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer
@@ -53,12 +53,12 @@ class SyncOperationHelperSpecification extends Specification {
         def connection = Mock(Connection)
         def function = Stub(CommandWriteTransformer)
         def connectionSource = Stub(ConnectionSource) {
-            getServerApi() >> null
             getConnection() >> connection
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def writeBinding = Stub(WriteBinding) {
-            getServerApi() >> null
             getWriteConnectionSource() >> connectionSource
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def connectionDescription = Stub(ConnectionDescription)
 
@@ -67,15 +67,21 @@ class SyncOperationHelperSpecification extends Specification {
 
         then:
         _ * connection.getDescription() >> connectionDescription
-        1 * connection.command(dbName, command, _, primary(), decoder, writeBinding) >> new BsonDocument()
+        1 * connection.command(dbName, command, _, primary(), decoder, OPERATION_CONTEXT) >> new BsonDocument()
         1 * connection.release()
     }
 
     def 'should retry with retryable exception'() {
         given:
+        def operationContext = OPERATION_CONTEXT
+                .withSessionContext(Stub(SessionContext) {
+                    hasSession() >> true
+                    hasActiveTransaction() >> false
+                    getReadConcern() >> ReadConcern.DEFAULT
+                })
         def dbName = 'db'
         def command = BsonDocument.parse('''{findAndModify: "coll", query: {a: 1}, new: false, update: {$inc: {a :1}}, txnNumber: 1}''')
-        def commandCreator = { serverDescription, connectionDescription -> command }
+        def commandCreator = { csot, serverDescription, connectionDescription -> command }
         def decoder = new BsonDocumentCodec()
         def results = [
             BsonDocument.parse('{ok: 1.0, writeConcernError: {code: 91, errmsg: "Replication is being shut down"}}'),
@@ -92,23 +98,20 @@ class SyncOperationHelperSpecification extends Specification {
             _ * getServerDescription() >> Stub(ServerDescription) {
                 getLogicalSessionTimeoutMinutes() >> 1
             }
+            getOperationContext() >> operationContext
         }
         def writeBinding = Stub(WriteBinding) {
             getWriteConnectionSource() >> connectionSource
-            getServerApi() >> null
-            getSessionContext() >> Stub(SessionContext) {
-                hasSession() >> true
-                hasActiveTransaction() >> false
-                getReadConcern() >> ReadConcern.DEFAULT
-            }
+            getOperationContext() >> operationContext
         }
 
         when:
-        executeRetryableWrite(writeBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, commandCreator,
-                FindAndModifyHelper.transformer()) { cmd -> cmd }
+        executeRetryableWrite(writeBinding, dbName, primary(),
+                new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.transformer())
+                { cmd -> cmd }
 
         then:
-        2 * connection.command(dbName, command, _, primary(), decoder, writeBinding) >> { results.poll() }
+        2 * connection.command(dbName, command, _, primary(), decoder, operationContext) >> { results.poll() }
 
         then:
         def ex = thrown(MongoWriteConcernException)
@@ -119,17 +122,18 @@ class SyncOperationHelperSpecification extends Specification {
         given:
         def dbName = 'db'
         def command = new BsonDocument('fakeCommandName', BsonNull.VALUE)
-        def commandCreator = { serverDescription, connectionDescription -> command }
+        def commandCreator = { csot, serverDescription, connectionDescription -> command }
         def decoder = Stub(Decoder)
         def function = Stub(CommandReadTransformer)
         def connection = Mock(Connection)
         def connectionSource = Stub(ConnectionSource) {
             getConnection() >> connection
             getReadPreference() >> readPreference
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def readBinding = Stub(ReadBinding) {
             getReadConnectionSource() >> connectionSource
-            getServerApi() >> null
+            getOperationContext() >> OPERATION_CONTEXT
         }
         def connectionDescription = Stub(ConnectionDescription)
 
@@ -138,7 +142,7 @@ class SyncOperationHelperSpecification extends Specification {
 
         then:
         _ * connection.getDescription() >> connectionDescription
-        1 * connection.command(dbName, command, _, readPreference, decoder, readBinding) >> new BsonDocument()
+        1 * connection.command(dbName, command, _, readPreference, decoder, OPERATION_CONTEXT) >> new BsonDocument()
         1 * connection.release()
 
         where:
diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java
new file mode 100644
index 00000000000..2c7b71949c8
--- /dev/null
+++ b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.internal.operation;
+
+import com.mongodb.WriteConcern;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.assertions.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+class WriteConcernHelperTest {
+
+    static WriteConcern[] shouldRemoveWtimeout(){
+        return new WriteConcern[]{
+                WriteConcern.ACKNOWLEDGED,
+                WriteConcern.MAJORITY,
+                WriteConcern.W1,
+                WriteConcern.W2,
+                WriteConcern.W3,
+                WriteConcern.UNACKNOWLEDGED,
+                WriteConcern.JOURNALED,
+
+                WriteConcern.ACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.MAJORITY.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.W1.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.W2.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.W3.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.UNACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS),
+                WriteConcern.JOURNALED.withWTimeout(100, TimeUnit.MILLISECONDS),
+        };
+    }
+
+    @MethodSource
+    @ParameterizedTest
+    void shouldRemoveWtimeout(final WriteConcern writeConcern){
+        //when
+        WriteConcern clonedWithoutTimeout = WriteConcernHelper.cloneWithoutTimeout(writeConcern);
+
+        //then
+        assertEquals(writeConcern.getWObject(), clonedWithoutTimeout.getWObject());
+        assertEquals(writeConcern.getJournal(), clonedWithoutTimeout.getJournal());
+        assertNull(clonedWithoutTimeout.getWTimeout(TimeUnit.MILLISECONDS));
+    }
+}
diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java
index 6de3150ad36..c7fc1d73e20 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java
@@ -20,6 +20,7 @@
 import com.mongodb.session.ClientSession;
 import org.junit.jupiter.api.Test;
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT;
 import static com.mongodb.ClusterFixture.getCluster;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
@@ -27,7 +28,7 @@ class BaseClientSessionImplTest {
 
     @Test
     void shouldNotCheckoutServerSessionIfNeverRequested() {
-        ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), null);
+        ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT);
         ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build());
 
         assertEquals(0, serverSessionPool.getInUseCount());
@@ -39,7 +40,7 @@ void shouldNotCheckoutServerSessionIfNeverRequested() {
 
     @Test
     void shouldDelayServerSessionCheckoutUntilRequested() {
-        ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), null);
+        ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT);
         ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build());
 
         assertEquals(0, serverSessionPool.getInUseCount());
diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy
index a1452d4f7a5..19bfa994200 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy
+++ b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy
@@ -32,6 +32,8 @@ import org.bson.BsonDocument
 import org.bson.codecs.BsonDocumentCodec
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.ClusterFixture.getServerApi
 import static com.mongodb.ReadPreference.primaryPreferred
 import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE
@@ -69,7 +71,7 @@ class ServerSessionPoolSpecification extends Specification {
         def cluster = Stub(Cluster) {
             getCurrentDescription() >> connectedDescription
         }
-        def pool = new ServerSessionPool(cluster, getServerApi())
+        def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi())
 
         when:
         def session = pool.get()
@@ -83,7 +85,7 @@ class ServerSessionPoolSpecification extends Specification {
         def cluster = Stub(Cluster) {
             getCurrentDescription() >> connectedDescription
         }
-        def pool = new ServerSessionPool(cluster, getServerApi())
+        def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi())
         pool.close()
 
         when:
@@ -98,7 +100,7 @@ class ServerSessionPoolSpecification extends Specification {
         def cluster = Stub(Cluster) {
             getCurrentDescription() >> connectedDescription
         }
-        def pool = new ServerSessionPool(cluster, getServerApi())
+        def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi())
         def session = pool.get()
 
         when:
@@ -118,7 +120,7 @@ class ServerSessionPoolSpecification extends Specification {
             millis() >>> [0, MINUTES.toMillis(29) + 1,
             ]
         }
-        def pool = new ServerSessionPool(cluster, getServerApi(), clock)
+        def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock)
         def sessionOne = pool.get()
 
         when:
@@ -144,7 +146,7 @@ class ServerSessionPoolSpecification extends Specification {
         def clock = Stub(ServerSessionPool.Clock) {
             millis() >>> [0, 0, 0]
         }
-        def pool = new ServerSessionPool(cluster, getServerApi(), clock)
+        def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock)
         def session = pool.get()
 
         when:
@@ -163,7 +165,7 @@ class ServerSessionPoolSpecification extends Specification {
         def clock = Stub(ServerSessionPool.Clock) {
             millis() >> 42
         }
-        def pool = new ServerSessionPool(cluster, getServerApi(), clock)
+        def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock)
 
         when:
         def session = pool.get() as ServerSessionPool.ServerSessionImpl
@@ -185,7 +187,7 @@ class ServerSessionPoolSpecification extends Specification {
         def clock = Stub(ServerSessionPool.Clock) {
             millis() >> 42
         }
-        def pool = new ServerSessionPool(cluster, getServerApi(), clock)
+        def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock)
 
         when:
         def session = pool.get() as ServerSessionPool.ServerSessionImpl
@@ -205,7 +207,7 @@ class ServerSessionPoolSpecification extends Specification {
         def cluster = Mock(Cluster) {
             getCurrentDescription() >> connectedDescription
         }
-        def pool = new ServerSessionPool(cluster, getServerApi())
+        def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi())
         def sessions = []
         10.times { sessions.add(pool.get()) }
 
diff --git a/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java
index 4f331d208a2..a1b3f37dd98 100644
--- a/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java
+++ b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java
@@ -15,23 +15,118 @@
  */
 package com.mongodb.internal.time;
 
+import com.mongodb.lang.Nullable;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
+import org.junit.jupiter.params.provider.ValueSource;
 
 import java.time.Duration;
 import java.util.Collection;
+import java.util.Objects;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
 import java.util.stream.Stream;
 
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
 import static java.util.Arrays.asList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+import static org.junit.jupiter.api.Assertions.assertAll;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.params.provider.Arguments.arguments;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+public final class TimePointTest {
+
+    private final AtomicLong currentNanos = new AtomicLong();
+    private final TimePoint mockTimePoint = new TimePoint(0L) {
+        @Override
+        long currentNanos() {
+            return currentNanos.get();
+        }
+    };
+
+    public static boolean isInfinite(final Timeout timeout) {
+        return timeout.call(NANOSECONDS, () -> true, (ns) -> false, () -> false);
+    }
+
+    public static boolean hasExpired(final Timeout timeout) {
+        return timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true);
+    }
+
+    public static long remaining(final Timeout timeout, final TimeUnit unit) {
+        return timeout.checkedCall(unit,
+                () -> {
+                    throw new AssertionError("Infinite TimePoints have infinite remaining time");
+                },
+                (time) -> time,
+                () -> 0L);
+    }
+
+    // Timeout
+
+    @Test
+    void timeoutExpiresIn() {
+        assertAll(
+                () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)),
+                () -> assertTrue(hasExpired(Timeout.expiresIn(0L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))),
+                () -> assertFalse(isInfinite(Timeout.expiresIn(1L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))),
+                () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED))));
+    }
+
+    @Test
+    void timeoutInfinite() {
+        assertEquals(Timeout.infinite(), TimePoint.infinite());
+    }
+
+    @Test
+    void timeoutAwaitOnCondition() throws InterruptedException {
+        Condition condition = mock(Condition.class);
+
+        Timeout.infinite().awaitOn(condition, () -> "ignored");
+        verify(condition, times(1)).await();
+        verifyNoMoreInteractions(condition);
+
+        reset(condition);
+
+        Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(condition, () -> "ignored");
+        verify(condition, times(1)).awaitNanos(anyLong());
+        verifyNoMoreInteractions(condition);
+    }
+
+    @Test
+    void timeoutAwaitOnLatch() throws InterruptedException {
+        CountDownLatch latch = mock(CountDownLatch.class);
+
+        Timeout.infinite().awaitOn(latch, () -> "ignored");
+        verify(latch, times(1)).await();
+        verifyNoMoreInteractions(latch);
+
+        reset(latch);
+
+        Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(latch, () -> "ignored");
+        verify(latch, times(1)).await(anyLong(), any(TimeUnit.class));
+        verifyNoMoreInteractions(latch);
+    }
+
+    // TimePoint
 
-final class TimePointTest {
     @Test
     void now() {
         TimePoint timePointLowerBound = TimePoint.at(System.nanoTime());
@@ -41,6 +136,65 @@ void now() {
         assertTrue(timePoint.compareTo(timePointUpperBound) <= 0, "the point is too late");
     }
 
+    @Test
+    void infinite() {
+        TimePoint infinite = TimePoint.infinite();
+        TimePoint now = TimePoint.now();
+        assertEquals(0, infinite.compareTo(TimePoint.infinite()));
+        assertTrue(infinite.compareTo(now) > 0);
+        assertTrue(now.compareTo(infinite) < 0);
+    }
+
+    @Test
+    void isInfinite() {
+        assertAll(
+                () -> assertTrue(isInfinite(Timeout.infinite())),
+                () -> assertFalse(isInfinite(TimePoint.now())));
+    }
+
+    @Test
+    void asTimeout() {
+        TimePoint t1 = TimePoint.now();
+        assertSame(t1, t1.asTimeout());
+        TimePoint t2 = TimePoint.infinite();
+        assertSame(t2, t2.asTimeout());
+    }
+
+
+    @Test
+    void remaining() {
+        assertAll(
+                () -> assertThrows(AssertionError.class, () -> remaining(TimePoint.infinite(), NANOSECONDS)),
+                () -> assertEquals(0, remaining(TimePoint.now(), NANOSECONDS))
+        );
+        Timeout earlier = TimePoint.at(System.nanoTime() - 100);
+        assertEquals(0, remaining(earlier, NANOSECONDS));
+        assertTrue(hasExpired(earlier));
+
+        currentNanos.set(-100);
+        assertEquals(100, remaining(mockTimePoint, NANOSECONDS));
+        currentNanos.set(-1000000);
+        assertEquals(1, remaining(mockTimePoint, MILLISECONDS));
+        currentNanos.set(-1000000 + 1);
+        assertEquals(0, remaining(mockTimePoint, MILLISECONDS));
+    }
+
+    @ParameterizedTest
+    @ValueSource(longs = {1, 7, 10, 100, 1000})
+    void remaining(final long durationNanos) {
+        TimePoint start = TimePoint.now();
+        Timeout timeout = start.timeoutAfterOrInfiniteIfNegative(durationNanos, NANOSECONDS);
+        while (!hasExpired(timeout)) {
+            long remainingNanosUpperBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos());
+            long remainingNanos = remaining(timeout, NANOSECONDS);
+            long remainingNanosLowerBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos());
+            assertTrue(remainingNanos >= remainingNanosLowerBound, "remaining nanos is too low");
+            assertTrue(remainingNanos <= remainingNanosUpperBound, "remaining nanos is too high");
+            Thread.yield();
+        }
+        assertTrue(TimePoint.now().durationSince(start).toNanos() >= durationNanos, "expired too early");
+    }
+
     @Test
     void elapsed() {
         TimePoint timePoint = TimePoint.now();
@@ -49,25 +203,88 @@ void elapsed() {
         Duration elapsedUpperBound = TimePoint.now().durationSince(timePoint);
         assertTrue(elapsed.compareTo(elapsedLowerBound) >= 0, "the elapsed is too low");
         assertTrue(elapsed.compareTo(elapsedUpperBound) <= 0, "the elapsed is too high");
+        assertThrows(AssertionError.class, () -> TimePoint.infinite().elapsed());
+
+        currentNanos.set(100);
+        assertEquals(100, mockTimePoint.elapsed().toNanos());
+        currentNanos.set(1000000);
+        assertEquals(1, mockTimePoint.elapsed().toMillis());
+        currentNanos.set(1000000 - 1);
+        assertEquals(0, mockTimePoint.elapsed().toMillis());
+    }
+
+    @Test
+    void hasExpired() {
+        assertAll(
+                () -> assertFalse(hasExpired(Timeout.infinite())),
+                () -> assertTrue(hasExpired(TimePoint.now())),
+                () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)),
+                () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED))));
     }
 
     @ParameterizedTest
     @MethodSource("earlierNanosAndNanosArguments")
-    void durationSince(final long earlierNanos, final long nanos) {
-        Duration expectedDuration = Duration.ofNanos(nanos - earlierNanos);
+    void durationSince(final Long earlierNanos, @Nullable final Long nanos) {
         TimePoint earlierTimePoint = TimePoint.at(earlierNanos);
         TimePoint timePoint = TimePoint.at(nanos);
+
+        if (nanos == null) {
+            assertThrows(AssertionError.class, () -> timePoint.durationSince(earlierTimePoint));
+            return;
+        }
+
+        Duration expectedDuration = Duration.ofNanos(nanos - earlierNanos);
         assertFalse(expectedDuration.isNegative());
         assertEquals(expectedDuration, timePoint.durationSince(earlierTimePoint));
         assertEquals(expectedDuration.negated(), earlierTimePoint.durationSince(timePoint));
     }
 
+    @ParameterizedTest
+    @ValueSource(longs = {1, 7, Long.MAX_VALUE / 2, Long.MAX_VALUE - 1})
+    void remainingNanos(final long durationNanos) {
+        TimePoint start = TimePoint.now();
+        TimePoint timeout = start.add(Duration.ofNanos(durationNanos));
+        assertEquals(durationNanos, timeout.durationSince(start).toNanos());
+        assertEquals(Math.max(0, durationNanos - 1), timeout.durationSince(start.add(Duration.ofNanos(1))).toNanos());
+        assertEquals(0, timeout.durationSince(start.add(Duration.ofNanos(durationNanos))).toNanos());
+        assertEquals(-1, timeout.durationSince(start.add(Duration.ofNanos(durationNanos + 1))).toNanos());
+    }
+
+    @Test
+    void fromNowOrInfinite() {
+        TimePoint timePoint = TimePoint.now();
+        assertAll(
+                () -> assertFalse(isInfinite(TimePoint.now().timeoutAfterOrInfiniteIfNegative(1L, NANOSECONDS))),
+                () -> assertEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(0, NANOSECONDS)),
+                () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)),
+                () -> assertNotEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)),
+                () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(Long.MAX_VALUE - 1, NANOSECONDS)));
+    }
+
+    @ParameterizedTest
+    @MethodSource("nanosAndDurationsArguments")
+    void add(final long nanos, final Duration duration) {
+        TimePoint timePoint = TimePoint.at(nanos);
+        assertEquals(duration, timePoint.add(duration).durationSince(timePoint));
+    }
+
+    private static Stream<Arguments> nanosAndDurationsArguments() {
+        Collection<Long> nanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
+        Collection<Long> durationsInNanos = asList(
+                // Using `-Long.MAX_VALUE` results in `ArithmeticException` in OpenJDK JDK 8 because of https://bugs.openjdk.org/browse/JDK-8146747.
+                // This was fixed in OpenJDK JDK 9.
+                -Long.MAX_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
+        return nanos.stream()
+                .flatMap(nano -> durationsInNanos.stream()
+                        .map(durationNanos -> arguments(nano, Duration.ofNanos(durationNanos))));
+    }
+
     @ParameterizedTest
     @MethodSource("earlierNanosAndNanosArguments")
-    void compareTo(final long earlierNanos, final long nanos) {
+    void compareTo(final Long earlierNanos, final Long nanos) {
         TimePoint earlierTimePoint = TimePoint.at(earlierNanos);
         TimePoint timePoint = TimePoint.at(nanos);
-        if (earlierNanos == nanos) {
+        if (Objects.equals(earlierNanos, nanos)) {
             assertEquals(0, earlierTimePoint.compareTo(timePoint));
             assertEquals(0, timePoint.compareTo(earlierTimePoint));
             assertEquals(earlierTimePoint, timePoint);
@@ -82,28 +299,30 @@ void compareTo(final long earlierNanos, final long nanos) {
 
     private static Stream<Arguments> earlierNanosAndNanosArguments() {
         Collection<Long> earlierNanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
-        Collection<Long> durationsInNanos = asList(0L, 1L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
+        Collection<Long> durationsInNanos = asList(0L, 1L, Long.MAX_VALUE / 2, Long.MAX_VALUE, null);
         return earlierNanos.stream()
                 .flatMap(earlier -> durationsInNanos.stream()
-                        .map(durationNanos -> arguments(earlier, earlier + durationNanos)));
+                        .map(durationNanos -> arguments(earlier, durationNanos == null ? null : earlier + durationNanos)));
     }
 
     @ParameterizedTest
-    @MethodSource("nanosAndDurationsArguments")
-    void add(final long nanos, final Duration duration) {
-        TimePoint timePoint = TimePoint.at(nanos);
-        assertEquals(duration, timePoint.add(duration).durationSince(timePoint));
+    @MethodSource("durationArguments")
+    void convertsUnits(final long duration, final TimeUnit unit) {
+        TimePoint start = TimePoint.now();
+        TimePoint end = start.timeoutAfterOrInfiniteIfNegative(duration, unit);
+        if (duration < 0) {
+            assertTrue(isInfinite(end));
+        } else {
+            assertEquals(unit.toNanos(duration), end.durationSince(start).toNanos());
+        }
     }
 
-    private static Stream<Arguments> nanosAndDurationsArguments() {
-        Collection<Long> nanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
-        Collection<Long> durationsInNanos = asList(
-                // Using `-Long.MAX_VALUE` results in `ArithmeticException` in OpenJDK JDK 8 because of https://bugs.openjdk.org/browse/JDK-8146747.
-                // This was fixed in OpenJDK JDK 9.
-                -Long.MAX_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE);
-        return nanos.stream()
-                .flatMap(nano -> durationsInNanos.stream()
-                        .map(durationNanos -> arguments(nano, Duration.ofNanos(durationNanos))));
+    private static Stream<Arguments> durationArguments() {
+        return Stream.of(TimeUnit.values())
+                .flatMap(unit -> Stream.of(
+                        Arguments.of(-7, unit),
+                        Arguments.of(0, unit),
+                        Arguments.of(7, unit)));
     }
 
     private TimePointTest() {
diff --git a/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java b/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java
deleted file mode 100644
index 03df92771ac..00000000000
--- a/driver-core/src/test/unit/com/mongodb/internal/time/TimeoutTest.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright 2008-present MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.mongodb.internal.time;
-
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.Arguments;
-import org.junit.jupiter.params.provider.MethodSource;
-import org.junit.jupiter.params.provider.ValueSource;
-
-import java.time.Duration;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Stream;
-
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
-import static org.junit.jupiter.api.Assertions.assertAll;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-final class TimeoutTest {
-    @Test
-    void isInfinite() {
-        assertAll(
-                () -> assertTrue(Timeout.infinite().isInfinite()),
-                () -> assertFalse(Timeout.immediate().isInfinite()),
-                () -> assertFalse(Timeout.startNow(1).isInfinite()),
-                () -> assertFalse(Timeout.started(1, TimePoint.now()).isInfinite()));
-    }
-
-    @Test
-    void isImmediate() {
-        assertAll(
-                () -> assertTrue(Timeout.immediate().isImmediate()),
-                () -> assertFalse(Timeout.infinite().isImmediate()),
-                () -> assertFalse(Timeout.startNow(1).isImmediate()),
-                () -> assertFalse(Timeout.started(1, TimePoint.now()).isImmediate()));
-    }
-
-    @Test
-    void started() {
-        TimePoint timePoint = TimePoint.now();
-        assertAll(
-                () -> assertEquals(Timeout.infinite(), Timeout.started(-1, timePoint)),
-                () -> assertEquals(Timeout.immediate(), Timeout.started(0, timePoint)),
-                () -> assertNotEquals(Timeout.infinite(), Timeout.started(1, timePoint)),
-                () -> assertNotEquals(Timeout.immediate(), Timeout.started(1, timePoint)),
-                () -> assertEquals(1, Timeout.started(1, timePoint).durationNanos()),
-                () -> assertEquals(timePoint, Timeout.started(1, timePoint).start()),
-                () -> assertNotEquals(Timeout.infinite(), Timeout.started(Long.MAX_VALUE - 1, timePoint)),
-                () -> assertEquals(Long.MAX_VALUE - 1, Timeout.started(Long.MAX_VALUE - 1, timePoint).durationNanos()),
-                () -> assertEquals(timePoint, Timeout.started(Long.MAX_VALUE - 1, timePoint).start()),
-                () -> assertEquals(Timeout.infinite(), Timeout.started(Long.MAX_VALUE, timePoint)));
-    }
-
-    @Test
-    void startNow() {
-        assertAll(
-                () -> assertEquals(Timeout.infinite(), Timeout.startNow(-1)),
-                () -> assertEquals(Timeout.immediate(), Timeout.startNow(0)),
-                () -> assertNotEquals(Timeout.infinite(), Timeout.startNow(1)),
-                () -> assertNotEquals(Timeout.immediate(), Timeout.startNow(1)),
-                () -> assertEquals(1, Timeout.startNow(1).durationNanos()),
-                () -> assertNotEquals(Timeout.infinite(), Timeout.startNow(Long.MAX_VALUE - 1)),
-                () -> assertEquals(Long.MAX_VALUE - 1, Timeout.startNow(Long.MAX_VALUE - 1).durationNanos()),
-                () -> assertEquals(Timeout.infinite(), Timeout.startNow(Long.MAX_VALUE)));
-    }
-
-    @ParameterizedTest
-    @MethodSource("durationArguments")
-    void startedConvertsUnits(final long duration, final TimeUnit unit) {
-        TimePoint timePoint = TimePoint.now();
-        if (duration < 0) {
-            assertTrue(Timeout.started(duration, unit, timePoint).isInfinite());
-        } else if (duration == 0) {
-            assertTrue(Timeout.started(duration, unit, timePoint).isImmediate());
-        } else {
-            assertEquals(unit.toNanos(duration), Timeout.started(duration, unit, timePoint).durationNanos());
-        }
-    }
-
-    @ParameterizedTest
-    @MethodSource("durationArguments")
-    void startNowConvertsUnits(final long duration, final TimeUnit unit) {
-        if (duration < 0) {
-            assertTrue(Timeout.startNow(duration, unit).isInfinite());
-        } else if (duration == 0) {
-            assertTrue(Timeout.startNow(duration, unit).isImmediate());
-        } else {
-            assertEquals(unit.toNanos(duration), Timeout.startNow(duration, unit).durationNanos());
-        }
-    }
-
-    private static Stream<Arguments> durationArguments() {
-        return Stream.of(TimeUnit.values())
-                .flatMap(unit -> Stream.of(
-                        Arguments.of(-7, unit),
-                        Arguments.of(0, unit),
-                        Arguments.of(7, unit)));
-    }
-
-    @Test
-    void remainingTrivialCases() {
-        assertAll(
-                () -> assertThrows(AssertionError.class, () -> Timeout.infinite().remaining(NANOSECONDS)),
-                () -> assertTrue(Timeout.infinite().remainingOrInfinite(NANOSECONDS) < 0),
-                () -> assertEquals(0, Timeout.immediate().remaining(NANOSECONDS)),
-                () -> assertEquals(0, Timeout.immediate().remainingOrInfinite(NANOSECONDS)));
-    }
-
-    @ParameterizedTest
-    @ValueSource(longs = {1, 7, Long.MAX_VALUE / 2, Long.MAX_VALUE - 1})
-    void remainingNanos(final long durationNanos) {
-        TimePoint start = TimePoint.now();
-        Timeout timeout = Timeout.started(durationNanos, start);
-        assertEquals(durationNanos, timeout.remainingNanos(start));
-        assertEquals(Math.max(0, durationNanos - 1), timeout.remainingNanos(start.add(Duration.ofNanos(1))));
-        assertEquals(0, timeout.remainingNanos(start.add(Duration.ofNanos(durationNanos))));
-        assertEquals(0, timeout.remainingNanos(start.add(Duration.ofNanos(durationNanos + 1))));
-    }
-
-    @Test
-    void expired() {
-        assertAll(
-                () -> assertFalse(Timeout.infinite().expired()),
-                () -> assertTrue(Timeout.immediate().expired()),
-                () -> assertTrue(Timeout.expired(0)),
-                () -> assertFalse(Timeout.expired(Long.MIN_VALUE)),
-                () -> assertFalse(Timeout.expired(-1)),
-                () -> assertFalse(Timeout.expired(1)),
-                () -> assertFalse(Timeout.expired(Long.MAX_VALUE)));
-    }
-
-    @Test
-    void convertRoundUp() {
-        assertAll(
-                () -> assertEquals(1, Timeout.convertRoundUp(1, NANOSECONDS)),
-                () -> assertEquals(0, Timeout.convertRoundUp(0, TimeUnit.MILLISECONDS)),
-                () -> assertEquals(1, Timeout.convertRoundUp(1, TimeUnit.MILLISECONDS)),
-                () -> assertEquals(1, Timeout.convertRoundUp(999_999, TimeUnit.MILLISECONDS)),
-                () -> assertEquals(1, Timeout.convertRoundUp(1_000_000, TimeUnit.MILLISECONDS)),
-                () -> assertEquals(2, Timeout.convertRoundUp(1_000_001, TimeUnit.MILLISECONDS)),
-                () -> assertEquals(1, Timeout.convertRoundUp(1, TimeUnit.DAYS)));
-    }
-
-    @ParameterizedTest
-    @ValueSource(longs = {1, 7, 10, 100, 1000})
-    void remaining(final long durationNanos) {
-        TimePoint start = TimePoint.now();
-        Timeout timeout = Timeout.started(durationNanos, start);
-        while (!timeout.expired()) {
-            long remainingNanosUpperBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos());
-            long remainingNanos = timeout.remaining(NANOSECONDS);
-            long remainingNanosLowerBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos());
-            assertTrue(remainingNanos >= remainingNanosLowerBound, "remaining nanos is too low");
-            assertTrue(remainingNanos <= remainingNanosUpperBound, "remaining nanos is too high");
-            Thread.yield();
-        }
-        assertTrue(TimePoint.now().durationSince(start).toNanos() >= durationNanos, "expired too early");
-    }
-
-    private TimeoutTest() {
-    }
-}
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt
index e4c3a3eb31a..439a0ccbb29 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.AggregateIterable as JAggregateIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.coroutine.AggregateFlow
 import java.util.concurrent.TimeUnit
@@ -28,7 +29,6 @@ import org.bson.conversions.Bson
 data class SyncAggregateIterable<T : Any>(val wrapped: AggregateFlow<T>) :
     JAggregateIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncAggregateIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     override fun toCollection() = runBlocking { wrapped.toCollection() }
 
     override fun allowDiskUse(allowDiskUse: Boolean?): SyncAggregateIterable<T> = apply {
@@ -59,6 +59,10 @@ data class SyncAggregateIterable<T : Any>(val wrapped: AggregateFlow<T>) :
 
     override fun let(variables: Bson?): SyncAggregateIterable<T> = apply { wrapped.let(variables) }
 
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
+
     override fun explain(): Document = runBlocking { wrapped.explain() }
 
     override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt
index c29f227d5d6..83ba91df16b 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt
@@ -20,6 +20,7 @@ import com.mongodb.ServerAddress
 import com.mongodb.TransactionOptions
 import com.mongodb.client.ClientSession as JClientSession
 import com.mongodb.client.TransactionBody
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.kotlin.client.coroutine.ClientSession
 import com.mongodb.session.ServerSession
 import kotlinx.coroutines.runBlocking
@@ -86,4 +87,6 @@ class SyncClientSession(internal val wrapped: ClientSession, private val origina
 
     override fun <T : Any> withTransaction(transactionBody: TransactionBody<T>, options: TransactionOptions): T =
         throw UnsupportedOperationException()
+
+    override fun getTimeoutContext(): TimeoutContext? = wrapped.getTimeoutContext()
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt
index 4f412c253a0..0fdc879d610 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.client.DistinctIterable as JDistinctIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.coroutine.DistinctFlow
 import java.util.concurrent.TimeUnit
@@ -32,4 +33,7 @@ data class SyncDistinctIterable<T : Any>(val wrapped: DistinctFlow<T>) :
     override fun collation(collation: Collation?): SyncDistinctIterable<T> = apply { wrapped.collation(collation) }
     override fun comment(comment: String?): SyncDistinctIterable<T> = apply { wrapped.comment(comment) }
     override fun comment(comment: BsonValue?): SyncDistinctIterable<T> = apply { wrapped.comment(comment) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt
index b9e3a6665d6..6c500a9cf90 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt
@@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.FindIterable as JFindIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.coroutine.FindFlow
 import java.util.concurrent.TimeUnit
@@ -76,6 +77,7 @@ data class SyncFindIterable<T : Any>(val wrapped: FindFlow<T>) : JFindIterable<T
     override fun returnKey(returnKey: Boolean): SyncFindIterable<T> = apply { wrapped.returnKey(returnKey) }
 
     override fun showRecordId(showRecordId: Boolean): SyncFindIterable<T> = apply { wrapped.showRecordId(showRecordId) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable<T> = apply { wrapped.timeoutMode(timeoutMode) }
 
     override fun explain(): Document = runBlocking { wrapped.explain() }
 
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt
index 4193e0f04f8..ab1853c756d 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.coroutine.ListCollectionsFlow
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -25,7 +26,6 @@ data class SyncListCollectionsIterable<T : Any>(val wrapped: ListCollectionsFlow
     JListCollectionsIterable<T>, SyncMongoIterable<T>(wrapped) {
 
     override fun batchSize(batchSize: Int): SyncListCollectionsIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable<T> = apply {
         wrapped.maxTime(maxTime, timeUnit)
     }
@@ -33,4 +33,7 @@ data class SyncListCollectionsIterable<T : Any>(val wrapped: ListCollectionsFlow
     override fun filter(filter: Bson?): SyncListCollectionsIterable<T> = apply { wrapped.filter(filter) }
     override fun comment(comment: String?): SyncListCollectionsIterable<T> = apply { wrapped.comment(comment) }
     override fun comment(comment: BsonValue?): SyncListCollectionsIterable<T> = apply { wrapped.comment(comment) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt
index 3acd5581f1b..4563dfe4a4f 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.coroutine.ListDatabasesFlow
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -41,4 +42,7 @@ data class SyncListDatabasesIterable<T : Any>(val wrapped: ListDatabasesFlow<T>)
     override fun comment(comment: String?): SyncListDatabasesIterable<T> = apply { wrapped.comment(comment) }
 
     override fun comment(comment: BsonValue?): SyncListDatabasesIterable<T> = apply { wrapped.comment(comment) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt
index 030b89bb1bf..0e329c7bcdd 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.client.ListIndexesIterable as JListIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.coroutine.ListIndexesFlow
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -28,4 +29,7 @@ data class SyncListIndexesIterable<T : Any>(val wrapped: ListIndexesFlow<T>) :
     }
     override fun comment(comment: String?): SyncListIndexesIterable<T> = apply { wrapped.comment(comment) }
     override fun comment(comment: BsonValue?): SyncListIndexesIterable<T> = apply { wrapped.comment(comment) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt
index 62af2fe0c7c..a7df87779df 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.coroutine.ListSearchIndexesFlow
 import java.util.concurrent.TimeUnit
@@ -45,6 +46,9 @@ internal class SyncListSearchIndexesIterable<T : Any>(val wrapped: ListSearchInd
 
     override fun comment(comment: String?): SyncListSearchIndexesIterable<T> = apply { wrapped.comment(comment) }
     override fun comment(comment: BsonValue?): SyncListSearchIndexesIterable<T> = apply { wrapped.comment(comment) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
     override fun explain(): Document = runBlocking { wrapped.explain() }
 
     override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt
index 9aab6ed51a6..8e5fc82455a 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt
@@ -18,6 +18,7 @@
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
 import com.mongodb.client.MapReduceIterable as JMapReduceIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.client.model.MapReduceAction
 import com.mongodb.kotlin.client.coroutine.MapReduceFlow
@@ -57,4 +58,7 @@ data class SyncMapReduceIterable<T : Any>(val wrapped: MapReduceFlow<T>) :
     }
 
     override fun collation(collation: Collation?): SyncMapReduceIterable<T> = apply { wrapped.collation(collation) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncMapReduceIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt
index 9cf01ce186f..bfa48ef1e1c 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt
@@ -15,76 +15,12 @@
  */
 package com.mongodb.kotlin.client.coroutine.syncadapter
 
-import com.mongodb.ClientSessionOptions
-import com.mongodb.client.ChangeStreamIterable
-import com.mongodb.client.ClientSession
-import com.mongodb.client.ListDatabasesIterable
 import com.mongodb.client.MongoClient as JMongoClient
-import com.mongodb.client.MongoDatabase
-import com.mongodb.client.MongoIterable
 import com.mongodb.connection.ClusterDescription
 import com.mongodb.kotlin.client.coroutine.MongoClient
-import kotlinx.coroutines.runBlocking
-import org.bson.Document
-import org.bson.conversions.Bson
 
-data class SyncMongoClient(val wrapped: MongoClient) : JMongoClient {
+internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient {
     override fun close(): Unit = wrapped.close()
 
-    override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName))
-
-    override fun startSession(): ClientSession = SyncClientSession(runBlocking { wrapped.startSession() }, this)
-
-    override fun startSession(options: ClientSessionOptions): ClientSession =
-        SyncClientSession(runBlocking { wrapped.startSession(options) }, this)
-
-    override fun listDatabaseNames(): MongoIterable<String> = SyncMongoIterable(wrapped.listDatabaseNames())
-
-    override fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
-        SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped()))
-
-    override fun listDatabases(): ListDatabasesIterable<Document> = SyncListDatabasesIterable(wrapped.listDatabases())
-
-    override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
-        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped()))
-
-    override fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
-        SyncListDatabasesIterable(wrapped.listDatabases(resultClass))
-
-    override fun <T : Any> listDatabases(
-        clientSession: ClientSession,
-        resultClass: Class<T>
-    ): ListDatabasesIterable<T> =
-        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass))
-
-    override fun watch(): ChangeStreamIterable<Document> = SyncChangeStreamIterable(wrapped.watch())
-
-    override fun <T : Any> watch(resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass))
-
-    override fun watch(pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(pipeline))
-
-    override fun <T : Any> watch(pipeline: MutableList<out Bson>, resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass))
-
-    override fun watch(clientSession: ClientSession): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped()))
-
-    override fun <T : Any> watch(clientSession: ClientSession, resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass))
-
-    override fun watch(clientSession: ClientSession, pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline))
-
-    override fun <T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: MutableList<out Bson>,
-        resultClass: Class<T>
-    ): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass))
-
     override fun getClusterDescription(): ClusterDescription = wrapped.getClusterDescription()
-
-    private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped
 }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt
new file mode 100644
index 00000000000..42313ed2b13
--- /dev/null
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.kotlin.client.coroutine.syncadapter
+
+import com.mongodb.ClientSessionOptions
+import com.mongodb.ReadConcern
+import com.mongodb.ReadPreference
+import com.mongodb.WriteConcern
+import com.mongodb.client.ChangeStreamIterable
+import com.mongodb.client.ClientSession
+import com.mongodb.client.ListDatabasesIterable
+import com.mongodb.client.MongoCluster as JMongoCluster
+import com.mongodb.client.MongoDatabase
+import com.mongodb.client.MongoIterable
+import com.mongodb.kotlin.client.coroutine.MongoCluster
+import java.util.concurrent.TimeUnit
+import kotlinx.coroutines.runBlocking
+import org.bson.Document
+import org.bson.codecs.configuration.CodecRegistry
+import org.bson.conversions.Bson
+
+internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster {
+    override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry
+
+    override fun getReadPreference(): ReadPreference = wrapped.readPreference
+
+    override fun getWriteConcern(): WriteConcern = wrapped.writeConcern
+
+    override fun getReadConcern(): ReadConcern = wrapped.readConcern
+
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
+
+    override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry))
+
+    override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withReadPreference(readPreference))
+
+    override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withReadConcern(readConcern))
+
+    override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withWriteConcern(writeConcern))
+
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit))
+
+    override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName))
+
+    override fun startSession(): ClientSession = SyncClientSession(runBlocking { wrapped.startSession() }, this)
+
+    override fun startSession(options: ClientSessionOptions): ClientSession =
+        SyncClientSession(runBlocking { wrapped.startSession(options) }, this)
+
+    override fun listDatabaseNames(): MongoIterable<String> = SyncMongoIterable(wrapped.listDatabaseNames())
+
+    override fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
+        SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped()))
+
+    override fun listDatabases(): ListDatabasesIterable<Document> = SyncListDatabasesIterable(wrapped.listDatabases())
+
+    override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
+        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped()))
+
+    override fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
+        SyncListDatabasesIterable(wrapped.listDatabases(resultClass))
+
+    override fun <T : Any> listDatabases(
+        clientSession: ClientSession,
+        resultClass: Class<T>
+    ): ListDatabasesIterable<T> =
+        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass))
+
+    override fun watch(): ChangeStreamIterable<Document> = SyncChangeStreamIterable(wrapped.watch())
+
+    override fun <T : Any> watch(resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass))
+
+    override fun watch(pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(pipeline))
+
+    override fun <T : Any> watch(pipeline: MutableList<out Bson>, resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass))
+
+    override fun watch(clientSession: ClientSession): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped()))
+
+    override fun <T : Any> watch(clientSession: ClientSession, resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass))
+
+    override fun watch(clientSession: ClientSession, pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline))
+
+    override fun <T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: MutableList<out Bson>,
+        resultClass: Class<T>
+    ): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass))
+
+    private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped
+}
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt
index 756c884608a..fa26fae86c1 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt
@@ -55,6 +55,7 @@ import com.mongodb.client.result.InsertManyResult
 import com.mongodb.client.result.InsertOneResult
 import com.mongodb.client.result.UpdateResult
 import com.mongodb.kotlin.client.coroutine.MongoCollection
+import java.util.concurrent.TimeUnit
 import kotlinx.coroutines.flow.toCollection
 import kotlinx.coroutines.runBlocking
 import org.bson.Document
@@ -74,6 +75,7 @@ data class SyncMongoCollection<T : Any>(val wrapped: MongoCollection<T>) : JMong
     override fun getWriteConcern(): WriteConcern = wrapped.writeConcern
 
     override fun getReadConcern(): ReadConcern = wrapped.readConcern
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
 
     override fun <R : Any> withDocumentClass(clazz: Class<R>): SyncMongoCollection<R> =
         SyncMongoCollection(wrapped.withDocumentClass(clazz))
@@ -90,6 +92,9 @@ data class SyncMongoCollection<T : Any>(val wrapped: MongoCollection<T>) : JMong
     override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection<T> =
         SyncMongoCollection(wrapped.withReadConcern(readConcern))
 
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection<T> =
+        SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit))
+
     override fun countDocuments(): Long = runBlocking { wrapped.countDocuments() }
 
     override fun countDocuments(filter: Bson): Long = runBlocking { wrapped.countDocuments(filter) }
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt
index ee4c4d23040..ae83a1443b7 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt
@@ -23,6 +23,7 @@ import com.mongodb.client.MongoDatabase as JMongoDatabase
 import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.client.model.CreateViewOptions
 import com.mongodb.kotlin.client.coroutine.MongoDatabase
+import java.util.concurrent.TimeUnit
 import kotlinx.coroutines.runBlocking
 import org.bson.Document
 import org.bson.codecs.configuration.CodecRegistry
@@ -39,6 +40,8 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase {
 
     override fun getReadConcern(): ReadConcern = wrapped.readConcern
 
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
+
     override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase =
         SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry))
 
@@ -51,6 +54,9 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase {
     override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase =
         SyncMongoDatabase(wrapped.withReadConcern(readConcern))
 
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase =
+        SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit))
+
     override fun getCollection(collectionName: String): MongoCollection<Document> =
         SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java))
 
diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt
index e7a22506f0a..98ab0d93b75 100644
--- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt
+++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt
@@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine.syncadapter
 import com.mongodb.Function
 import com.mongodb.client.MongoCursor
 import com.mongodb.client.MongoIterable as JMongoIterable
+import com.mongodb.client.cursor.TimeoutMode
 import kotlinx.coroutines.flow.Flow
 import kotlinx.coroutines.flow.firstOrNull
 import kotlinx.coroutines.flow.map
@@ -26,6 +27,7 @@ import kotlinx.coroutines.runBlocking
 
 open class SyncMongoIterable<T>(private val delegate: Flow<T>) : JMongoIterable<T> {
     private var batchSize: Int? = null
+    private var timeoutMode: TimeoutMode? = null
 
     override fun iterator(): MongoCursor<T> = cursor()
 
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt
index 683746efc96..c8da59450ad 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt
@@ -16,6 +16,9 @@
 package com.mongodb.kotlin.client.coroutine
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.AggregatePublisher
 import java.util.concurrent.TimeUnit
@@ -45,6 +48,19 @@ public class AggregateFlow<T : Any>(private val wrapped: AggregatePublisher<T>)
      */
     public fun batchSize(batchSize: Int): AggregateFlow<T> = apply { wrapped.batchSize(batchSize) }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): AggregateFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Aggregates documents according to the specified aggregation pipeline, which must end with a $out or $merge stage.
      *
@@ -167,7 +183,6 @@ public class AggregateFlow<T : Any>(private val wrapped: AggregatePublisher<T>)
     /**
      * Explain the execution plan for this operation with the given verbosity level
      *
-     * @param R the type of the document class
      * @param verbosity the verbosity of the explanation
      * @return the execution plan
      * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/)
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt
index 4a214d6282c..55bfeb82060 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt
@@ -39,6 +39,15 @@ import org.bson.BsonValue
  */
 public class ChangeStreamFlow<T : Any>(private val wrapped: ChangeStreamPublisher<T>) : Flow<ChangeStreamDocument<T>> {
 
+    /**
+     * Sets the number of documents to return per batch.
+     *
+     * @param batchSize the batch size
+     * @return this
+     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     */
+    public fun batchSize(batchSize: Int): ChangeStreamFlow<T> = apply { wrapped.batchSize(batchSize) }
+
     /**
      * Sets the fullDocument value.
      *
@@ -68,15 +77,6 @@ public class ChangeStreamFlow<T : Any>(private val wrapped: ChangeStreamPublishe
      */
     public fun resumeAfter(resumeToken: BsonDocument): ChangeStreamFlow<T> = apply { wrapped.resumeAfter(resumeToken) }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public fun batchSize(batchSize: Int): ChangeStreamFlow<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the maximum await execution time on the server for this operation.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt
index 6809b0b2777..6c53a1faf47 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt
@@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.coroutine
 import com.mongodb.ClientSessionOptions
 import com.mongodb.ServerAddress
 import com.mongodb.TransactionOptions
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.reactivestreams.client.ClientSession as reactiveClientSession
 import com.mongodb.session.ClientSession as jClientSession
 import com.mongodb.session.ServerSession
@@ -214,6 +215,18 @@ public class ClientSession(public val wrapped: reactiveClientSession) : jClientS
     public suspend fun abortTransaction() {
         wrapped.abortTransaction().awaitFirstOrNull()
     }
+
+    /**
+     * Gets the timeout context to use with this session:
+     * * `MongoClientSettings#getTimeoutMS`
+     * * `ClientSessionOptions#getDefaultTimeout`
+     *
+     * Note: For internal use only
+     *
+     * @return the timeout to use
+     * @since 5.2
+     */
+    public override fun getTimeoutContext(): TimeoutContext? = wrapped.timeoutContext
 }
 
 /**
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt
index 3583e4a2390..c65f7f6301c 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt
@@ -15,6 +15,9 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.DistinctPublisher
 import java.util.concurrent.TimeUnit
@@ -41,6 +44,19 @@ public class DistinctFlow<T : Any>(private val wrapped: DistinctPublisher<T>) :
      */
     public fun batchSize(batchSize: Int): DistinctFlow<T> = apply { wrapped.batchSize(batchSize) }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): DistinctFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Sets the query filter to apply to the query.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt
index 49a391c236f..f0afb4e9937 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt
@@ -17,6 +17,9 @@ package com.mongodb.kotlin.client.coroutine
 
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.FindPublisher
 import java.util.concurrent.TimeUnit
@@ -45,6 +48,24 @@ public class FindFlow<T : Any>(private val wrapped: FindPublisher<T>) : Flow<T>
      */
     public fun batchSize(batchSize: Int): FindFlow<T> = apply { wrapped.batchSize(batchSize) }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * If the `timeout` is set then:
+     * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME]
+     * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure
+     *   it as: [TimeoutMode.CURSOR_LIFETIME]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): FindFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Sets the query filter to apply to the query.
      *
@@ -250,7 +271,6 @@ public class FindFlow<T : Any>(private val wrapped: FindPublisher<T>) : Flow<T>
     /**
      * Explain the execution plan for this operation with the given verbosity level
      *
-     * @param R the type of the document class
      * @param verbosity the verbosity of the explanation
      * @return the execution plan
      * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/)
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt
index bc205b7073f..a6dfd770e08 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt
@@ -15,6 +15,9 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher
 import java.util.concurrent.TimeUnit
 import kotlinx.coroutines.flow.Flow
@@ -31,6 +34,31 @@ import org.bson.conversions.Bson
  */
 public class ListCollectionsFlow<T : Any>(private val wrapped: ListCollectionsPublisher<T>) :
     Flow<T> by wrapped.asFlow() {
+
+    /**
+     * Sets the number of documents to return per batch.
+     *
+     * @param batchSize the batch size
+     * @return this
+     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     */
+    public fun batchSize(batchSize: Int): ListCollectionsFlow<T> = apply { wrapped.batchSize(batchSize) }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsFlow<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -43,15 +71,6 @@ public class ListCollectionsFlow<T : Any>(private val wrapped: ListCollectionsPu
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public fun batchSize(batchSize: Int): ListCollectionsFlow<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the query filter to apply to the returned database names.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt
index 4b56333bb38..473cde087b6 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt
@@ -15,6 +15,9 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher
 import java.util.concurrent.TimeUnit
 import kotlinx.coroutines.flow.Flow
@@ -30,6 +33,29 @@ import org.bson.conversions.Bson
  * @see [List databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases/)
  */
 public class ListDatabasesFlow<T : Any>(private val wrapped: ListDatabasesPublisher<T>) : Flow<T> by wrapped.asFlow() {
+
+    /**
+     * Sets the number of documents to return per batch.
+     *
+     * @param batchSize the batch size
+     * @return this
+     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     */
+    public fun batchSize(batchSize: Int): ListDatabasesFlow<T> = apply { wrapped.batchSize(batchSize) }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -42,15 +68,6 @@ public class ListDatabasesFlow<T : Any>(private val wrapped: ListDatabasesPublis
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public fun batchSize(batchSize: Int): ListDatabasesFlow<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the query filter to apply to the returned database names.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt
index 9e856d28ee3..b92453158a1 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt
@@ -15,6 +15,9 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListIndexesPublisher
 import java.util.concurrent.TimeUnit
 import kotlinx.coroutines.flow.Flow
@@ -29,6 +32,29 @@ import org.bson.BsonValue
  * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/)
  */
 public class ListIndexesFlow<T : Any>(private val wrapped: ListIndexesPublisher<T>) : Flow<T> by wrapped.asFlow() {
+
+    /**
+     * Sets the number of documents to return per batch.
+     *
+     * @param batchSize the batch size
+     * @return this
+     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     */
+    public fun batchSize(batchSize: Int): ListIndexesFlow<T> = apply { wrapped.batchSize(batchSize) }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -41,15 +67,6 @@ public class ListIndexesFlow<T : Any>(private val wrapped: ListIndexesPublisher<
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public fun batchSize(batchSize: Int): ListIndexesFlow<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the comment for this operation. A null value means no comment is set.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt
index ce355c69e41..1c7fe4ded5e 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt
@@ -16,6 +16,9 @@
 package com.mongodb.kotlin.client.coroutine
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher
 import java.util.concurrent.TimeUnit
@@ -36,6 +39,30 @@ import org.bson.Document
 public class ListSearchIndexesFlow<T : Any>(private val wrapped: ListSearchIndexesPublisher<T>) :
     Flow<T> by wrapped.asFlow() {
 
+    /**
+     * Sets the number of documents to return per batch.
+     *
+     * @param batchSize the batch size
+     * @return this
+     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     */
+    public fun batchSize(batchSize: Int): ListSearchIndexesFlow<T> = apply { wrapped.batchSize(batchSize) }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesFlow<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
+
     /**
      * Sets an Atlas Search index name for this operation.
      *
@@ -55,15 +82,6 @@ public class ListSearchIndexesFlow<T : Any>(private val wrapped: ListSearchIndex
         wrapped.allowDiskUse(allowDiskUse)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size.
-     * @return this.
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public fun batchSize(batchSize: Int): ListSearchIndexesFlow<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the maximum execution time on the server for this operation.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt
index 1849f9ae92f..407f1b8fe39 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt
@@ -17,6 +17,9 @@
 
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.client.model.MapReduceAction
 import com.mongodb.reactivestreams.client.MapReducePublisher
@@ -37,6 +40,7 @@ import org.bson.conversions.Bson
  */
 @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith(""))
 public class MapReduceFlow<T : Any>(private val wrapped: MapReducePublisher<T>) : Flow<T> by wrapped.asFlow() {
+
     /**
      * Sets the number of documents to return per batch.
      *
@@ -46,6 +50,19 @@ public class MapReduceFlow<T : Any>(private val wrapped: MapReducePublisher<T>)
      */
     public fun batchSize(batchSize: Int): MapReduceFlow<T> = apply { wrapped.batchSize(batchSize) }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): MapReduceFlow<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Aggregates documents to a collection according to the specified map-reduce function with the given options, which
      * must specify a non-inline result.
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt
index fc97c2e3bb4..c4c2acc27f6 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt
@@ -24,11 +24,7 @@ import com.mongodb.lang.Nullable
 import com.mongodb.reactivestreams.client.MongoClient as JMongoClient
 import com.mongodb.reactivestreams.client.MongoClients as JMongoClients
 import java.io.Closeable
-import kotlinx.coroutines.flow.Flow
-import kotlinx.coroutines.reactive.asFlow
-import kotlinx.coroutines.reactive.awaitSingle
-import org.bson.Document
-import org.bson.conversions.Bson
+import java.util.concurrent.TimeUnit
 
 /**
  * A client-side representation of a MongoDB cluster.
@@ -42,7 +38,7 @@ import org.bson.conversions.Bson
  *
  * @see MongoClient.create
  */
-public class MongoClient(private val wrapped: JMongoClient) : Closeable {
+public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable {
 
     /**
      * A factory for [MongoClient] instances.
@@ -112,176 +108,13 @@ public class MongoClient(private val wrapped: JMongoClient) : Closeable {
      * @see com.mongodb.MongoClientSettings.Builder.applyToClusterSettings
      */
     public fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription
-
-    /**
-     * Gets a [MongoDatabase] instance for the given database name.
-     *
-     * @param databaseName the name of the database to retrievecom.mongodb.connection.
-     * @return a `MongoDatabase` representing the specified database
-     * @throws IllegalArgumentException if databaseName is invalid
-     * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity
-     */
-    public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName))
-
-    /**
-     * Creates a client session.
-     *
-     * Note: A ClientSession instance can not be used concurrently in multiple operations.
-     *
-     * @param options the options for the client session
-     * @return the client session
-     */
-    public suspend fun startSession(
-        options: ClientSessionOptions = ClientSessionOptions.builder().build()
-    ): ClientSession = ClientSession(wrapped.startSession(options).awaitSingle())
-
-    /**
-     * Get a list of the database names
-     *
-     * @return an iterable containing all the names of all the databases
-     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
-     */
-    public fun listDatabaseNames(): Flow<String> = wrapped.listDatabaseNames().asFlow()
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the list databases iterable interface
-     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
-     */
-    public fun listDatabaseNames(clientSession: ClientSession): Flow<String> =
-        wrapped.listDatabaseNames(clientSession.wrapped).asFlow()
-
-    /**
-     * Gets the list of databases
-     *
-     * @return the list databases iterable interface
-     */
-    @JvmName("listDatabasesAsDocument")
-    public fun listDatabases(): ListDatabasesFlow<Document> = listDatabases<Document>()
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the list databases iterable interface
-     */
-    @JvmName("listDatabasesAsDocumentWithSession")
-    public fun listDatabases(clientSession: ClientSession): ListDatabasesFlow<Document> =
-        listDatabases<Document>(clientSession)
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @param resultClass the target document type of the iterable.
-     * @return the list databases iterable interface
-     */
-    public fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesFlow<T> =
-        ListDatabasesFlow(wrapped.listDatabases(resultClass))
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @param clientSession the client session with which to associate this operation
-     * @param resultClass the target document type of the iterable.
-     * @return the list databases iterable interface
-     */
-    public fun <T : Any> listDatabases(clientSession: ClientSession, resultClass: Class<T>): ListDatabasesFlow<T> =
-        ListDatabasesFlow(wrapped.listDatabases(clientSession.wrapped, resultClass))
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @return the list databases iterable interface
-     */
-    public inline fun <reified T : Any> listDatabases(): ListDatabasesFlow<T> = listDatabases(T::class.java)
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param T the type of the class to use
-     * @return the list databases iterable interface
-     */
-    public inline fun <reified T : Any> listDatabases(clientSession: ClientSession): ListDatabasesFlow<T> =
-        listDatabases(clientSession, T::class.java)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    @JvmName("watchAsDocument")
-    public fun watch(pipeline: List<Bson> = emptyList()): ChangeStreamFlow<Document> = watch<Document>(pipeline)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    @JvmName("watchAsDocumentWithSession")
-    public fun watch(clientSession: ClientSession, pipeline: List<Bson> = emptyList()): ChangeStreamFlow<Document> =
-        watch<Document>(clientSession, pipeline)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @param resultClass the target document type of the iterable.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public fun <T : Any> watch(pipeline: List<Bson> = emptyList(), resultClass: Class<T>): ChangeStreamFlow<T> =
-        ChangeStreamFlow(wrapped.watch(pipeline, resultClass))
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @param resultClass the target document type of the iterable.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public fun <T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: List<Bson> = emptyList(),
-        resultClass: Class<T>
-    ): ChangeStreamFlow<T> = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass))
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public inline fun <reified T : Any> watch(pipeline: List<Bson> = emptyList()): ChangeStreamFlow<T> =
-        watch(pipeline, T::class.java)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public inline fun <reified T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: List<Bson> = emptyList()
-    ): ChangeStreamFlow<T> = watch(clientSession, pipeline, T::class.java)
 }
+
+/**
+ * ClientSessionOptions.Builder.defaultTimeout extension function
+ *
+ * @param defaultTimeout time in milliseconds
+ * @return the options
+ */
+public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder =
+    this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) }
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt
new file mode 100644
index 00000000000..88df39dd23d
--- /dev/null
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.kotlin.client.coroutine
+
+import com.mongodb.ClientSessionOptions
+import com.mongodb.ReadConcern
+import com.mongodb.ReadPreference
+import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.reactivestreams.client.MongoCluster as JMongoCluster
+import java.util.concurrent.TimeUnit
+import kotlinx.coroutines.flow.Flow
+import kotlinx.coroutines.reactive.asFlow
+import kotlinx.coroutines.reactive.awaitSingle
+import org.bson.Document
+import org.bson.codecs.configuration.CodecRegistry
+import org.bson.conversions.Bson
+
+/**
+ * The client-side representation of a MongoDB cluster operations.
+ *
+ * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed,
+ * then any operations will fail.
+ *
+ * @see MongoClient
+ * @since 5.2
+ */
+public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) {
+
+    /** The codec registry. */
+    public val codecRegistry: CodecRegistry
+        get() = wrapped.codecRegistry
+
+    /** The read concern. */
+    public val readConcern: ReadConcern
+        get() = wrapped.readConcern
+
+    /** The read preference. */
+    public val readPreference: ReadPreference
+        get() = wrapped.readPreference
+
+    /** The write concern. */
+    public val writeConcern: WriteConcern
+        get() = wrapped.writeConcern
+
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
+    /**
+     * Create a new MongoCluster instance with a different codec registry.
+     *
+     * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of
+     * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining
+     * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances
+     * using the factory methods in [org.bson.codecs.configuration.CodecRegistries].
+     *
+     * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database
+     * @return a new MongoCluster instance with the different codec registry
+     * @see org.bson.codecs.configuration.CodecRegistries
+     */
+    public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster =
+        MongoCluster(wrapped.withCodecRegistry(newCodecRegistry))
+
+    /**
+     * Create a new MongoCluster instance with a different read preference.
+     *
+     * @param newReadPreference the new [ReadPreference] for the database
+     * @return a new MongoCluster instance with the different readPreference
+     */
+    public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster =
+        MongoCluster(wrapped.withReadPreference(newReadPreference))
+
+    /**
+     * Create a new MongoCluster instance with a different read concern.
+     *
+     * @param newReadConcern the new [ReadConcern] for the database
+     * @return a new MongoCluster instance with the different ReadConcern
+     * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/)
+     */
+    public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster =
+        MongoCluster(wrapped.withReadConcern(newReadConcern))
+
+    /**
+     * Create a new MongoCluster instance with a different write concern.
+     *
+     * @param newWriteConcern the new [WriteConcern] for the database
+     * @return a new MongoCluster instance with the different writeConcern
+     */
+    public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster =
+        MongoCluster(wrapped.withWriteConcern(newWriteConcern))
+
+    /**
+     * Create a new MongoCluster instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoCluster instance with the set time limit for operations
+     * @see [MongoDatabase.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster =
+        MongoCluster(wrapped.withTimeout(timeout, timeUnit))
+
+    /**
+     * Gets a [MongoDatabase] instance for the given database name.
+     *
+     * @param databaseName the name of the database to retrieve
+     * @return a `MongoDatabase` representing the specified database
+     * @throws IllegalArgumentException if databaseName is invalid
+     * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity
+     */
+    public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName))
+
+    /**
+     * Creates a client session.
+     *
+     * Note: A ClientSession instance can not be used concurrently in multiple operations.
+     *
+     * @param options the options for the client session
+     * @return the client session
+     */
+    public suspend fun startSession(
+        options: ClientSessionOptions = ClientSessionOptions.builder().build()
+    ): ClientSession = ClientSession(wrapped.startSession(options).awaitSingle())
+
+    /**
+     * Get a list of the database names
+     *
+     * @return an iterable containing all the names of all the databases
+     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
+     */
+    public fun listDatabaseNames(): Flow<String> = wrapped.listDatabaseNames().asFlow()
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
+     */
+    public fun listDatabaseNames(clientSession: ClientSession): Flow<String> =
+        wrapped.listDatabaseNames(clientSession.wrapped).asFlow()
+
+    /**
+     * Gets the list of databases
+     *
+     * @return the list databases iterable interface
+     */
+    @JvmName("listDatabasesAsDocument")
+    public fun listDatabases(): ListDatabasesFlow<Document> = listDatabases<Document>()
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     */
+    @JvmName("listDatabasesAsDocumentWithSession")
+    public fun listDatabases(clientSession: ClientSession): ListDatabasesFlow<Document> =
+        listDatabases<Document>(clientSession)
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @param resultClass the target document type of the iterable.
+     * @return the list databases iterable interface
+     */
+    public fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesFlow<T> =
+        ListDatabasesFlow(wrapped.listDatabases(resultClass))
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the target document type of the iterable.
+     * @return the list databases iterable interface
+     */
+    public fun <T : Any> listDatabases(clientSession: ClientSession, resultClass: Class<T>): ListDatabasesFlow<T> =
+        ListDatabasesFlow(wrapped.listDatabases(clientSession.wrapped, resultClass))
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @return the list databases iterable interface
+     */
+    public inline fun <reified T : Any> listDatabases(): ListDatabasesFlow<T> = listDatabases(T::class.java)
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param T the type of the class to use
+     * @return the list databases iterable interface
+     */
+    public inline fun <reified T : Any> listDatabases(clientSession: ClientSession): ListDatabasesFlow<T> =
+        listDatabases(clientSession, T::class.java)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    @JvmName("watchAsDocument")
+    public fun watch(pipeline: List<Bson> = emptyList()): ChangeStreamFlow<Document> = watch<Document>(pipeline)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    @JvmName("watchAsDocumentWithSession")
+    public fun watch(clientSession: ClientSession, pipeline: List<Bson> = emptyList()): ChangeStreamFlow<Document> =
+        watch<Document>(clientSession, pipeline)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @param resultClass the target document type of the iterable.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public fun <T : Any> watch(pipeline: List<Bson> = emptyList(), resultClass: Class<T>): ChangeStreamFlow<T> =
+        ChangeStreamFlow(wrapped.watch(pipeline, resultClass))
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @param resultClass the target document type of the iterable.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public fun <T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: List<Bson> = emptyList(),
+        resultClass: Class<T>
+    ): ChangeStreamFlow<T> = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass))
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public inline fun <reified T : Any> watch(pipeline: List<Bson> = emptyList()): ChangeStreamFlow<T> =
+        watch(pipeline, T::class.java)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public inline fun <reified T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: List<Bson> = emptyList()
+    ): ChangeStreamFlow<T> = watch(clientSession, pipeline, T::class.java)
+}
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt
index b1026c359f9..5602b5ecd11 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt
@@ -19,6 +19,8 @@ import com.mongodb.MongoNamespace
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.bulk.BulkWriteResult
 import com.mongodb.client.model.BulkWriteOptions
 import com.mongodb.client.model.CountOptions
@@ -87,6 +89,28 @@ public class MongoCollection<T : Any>(private val wrapped: JMongoCollection<T>)
     public val writeConcern: WriteConcern
         get() = wrapped.writeConcern
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
     /**
      * Create a new collection instance with a different default class to cast any documents returned from the database
      * into.
@@ -150,6 +174,21 @@ public class MongoCollection<T : Any>(private val wrapped: JMongoCollection<T>)
     public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection<T> =
         MongoCollection(wrapped.withWriteConcern(newWriteConcern))
 
+    /**
+     * Create a new MongoCollection instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoCollection instance with the set time limit for operations
+     * @see [MongoCollection.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection<T> =
+        MongoCollection(wrapped.withTimeout(timeout, timeUnit))
+
     /**
      * Counts the number of documents in the collection.
      *
diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt
index bf40401a0a1..007251bab31 100644
--- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt
+++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt
@@ -18,6 +18,8 @@ package com.mongodb.kotlin.client.coroutine
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.client.model.CreateViewOptions
 import com.mongodb.reactivestreams.client.MongoDatabase as JMongoDatabase
@@ -55,6 +57,28 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     public val writeConcern: WriteConcern
         get() = wrapped.writeConcern
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
     /**
      * Create a new MongoDatabase instance with a different codec registry.
      *
@@ -98,6 +122,21 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase =
         MongoDatabase(wrapped.withWriteConcern(newWriteConcern))
 
+    /**
+     * Create a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoDatabase instance with the set time limit for operations
+     * @see [MongoDatabase.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase =
+        MongoDatabase(wrapped.withTimeout(timeout, timeUnit))
+
     /**
      * Gets a collection.
      *
@@ -150,6 +189,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
@@ -166,6 +208,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
@@ -184,6 +229,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
@@ -198,6 +246,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt
index cf8ebaa02cf..07953277d5a 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.coroutine
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.AggregatePublisher
 import java.util.concurrent.TimeUnit
@@ -71,6 +72,7 @@ class AggregateFlowTest {
         flow.maxAwaitTime(1, TimeUnit.SECONDS)
         flow.maxTime(1)
         flow.maxTime(1, TimeUnit.SECONDS)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).allowDiskUse(true)
         verify(wrapped).batchSize(batchSize)
@@ -85,6 +87,7 @@ class AggregateFlowTest {
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
         verify(wrapped).let(bson)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() })
         whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() })
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt
index fa3b25f92dd..571c6f579bb 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt
@@ -15,6 +15,7 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.DistinctPublisher
 import java.util.concurrent.TimeUnit
@@ -55,6 +56,7 @@ class DistinctFlowTest {
         flow.filter(filter)
         flow.maxTime(1)
         flow.maxTime(1, TimeUnit.SECONDS)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).collation(collation)
@@ -63,6 +65,7 @@ class DistinctFlowTest {
         verify(wrapped).filter(filter)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt
index 9243748f1af..ae4f13639eb 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt
@@ -29,6 +29,7 @@ class ExtensionMethodsTest {
                 "CountOptions",
                 "CreateCollectionOptions",
                 "CreateIndexOptions",
+                "ClientSessionOptions",
                 "DropIndexOptions",
                 "EstimatedDocumentCountOptions",
                 "FindOneAndDeleteOptions",
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt
index 2216c044883..450059c8211 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.coroutine
 
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.reactivestreams.client.FindPublisher
 import java.util.concurrent.TimeUnit
@@ -78,6 +79,7 @@ class FindFlowTest {
         flow.showRecordId(true)
         flow.skip(1)
         flow.sort(bson)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).allowDiskUse(true)
         verify(wrapped).batchSize(batchSize)
@@ -103,6 +105,7 @@ class FindFlowTest {
         verify(wrapped).showRecordId(true)
         verify(wrapped).skip(1)
         verify(wrapped).sort(bson)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() })
         whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() })
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt
index a84b4990129..c2aa221c98e 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt
@@ -38,6 +38,7 @@ class ListCollectionNamesFlowTest {
     }
 
     @Test
+    @Suppress("DEPRECATION")
     fun shouldCallTheUnderlyingMethods() {
         val wrapped: ListCollectionNamesPublisher = mock()
         val flow = ListCollectionNamesFlow(wrapped)
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt
index 98d16113ff9..59c6f896c86 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt
@@ -15,6 +15,7 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -54,6 +55,7 @@ class ListCollectionsFlowTest {
         flow.filter(filter)
         flow.maxTime(1)
         flow.maxTime(1, TimeUnit.SECONDS)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).comment(bsonComment)
@@ -61,6 +63,7 @@ class ListCollectionsFlowTest {
         verify(wrapped).filter(filter)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt
index 53e44f740f1..eac18960b3f 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt
@@ -15,6 +15,7 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -55,6 +56,7 @@ class ListDatabasesFlowTest {
         flow.maxTime(1)
         flow.maxTime(1, TimeUnit.SECONDS)
         flow.nameOnly(true)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).authorizedDatabasesOnly(true)
         verify(wrapped).batchSize(batchSize)
@@ -64,6 +66,7 @@ class ListDatabasesFlowTest {
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
         verify(wrapped).nameOnly(true)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt
index 69287d1918d..d84765d428b 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt
@@ -15,6 +15,7 @@
  */
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListIndexesPublisher
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -50,12 +51,14 @@ class ListIndexesFlowTest {
         flow.comment(comment)
         flow.maxTime(1)
         flow.maxTime(1, TimeUnit.SECONDS)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).comment(bsonComment)
         verify(wrapped).comment(comment)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt
index 440566fcae8..b9ef9133e87 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt
@@ -17,6 +17,7 @@
 
 package com.mongodb.kotlin.client.coroutine
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.client.model.MapReduceAction
 import com.mongodb.reactivestreams.client.MapReducePublisher
@@ -71,6 +72,7 @@ class MapReduceFlowTest {
         flow.sort(bson)
         flow.verbose(true)
         flow.action(MapReduceAction.MERGE)
+        flow.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).bypassDocumentValidation(true)
@@ -87,6 +89,7 @@ class MapReduceFlowTest {
         verify(wrapped).sort(bson)
         verify(wrapped).verbose(true)
         verify(wrapped).action(MapReduceAction.MERGE)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         whenever(wrapped.toCollection()).doReturn(Mono.empty())
         runBlocking { flow.toCollection() }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt
index e8e121f85dc..7be5c068a84 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt
@@ -72,7 +72,16 @@ class MongoCollectionTest {
     fun shouldHaveTheSameMethods() {
         val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet()
         val kMongoCollectionFunctions =
-            MongoCollection::class.declaredFunctions.map { it.name }.toSet() +
+            MongoCollection::class
+                .declaredFunctions
+                .map {
+                    if (it.name == "timeout") {
+                        "getTimeout"
+                    } else {
+                        it.name
+                    }
+                }
+                .toSet() +
                 MongoCollection::class
                     .declaredMemberProperties
                     .filterNot { it.name == "wrapped" }
diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt
index 4ba7502bd24..031e2e6d1ef 100644
--- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt
+++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt
@@ -54,7 +54,16 @@ class MongoDatabaseTest {
     fun shouldHaveTheSameMethods() {
         val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet()
         val kMongoDatabaseFunctions =
-            MongoDatabase::class.declaredFunctions.map { it.name }.toSet() +
+            MongoDatabase::class
+                .declaredFunctions
+                .map {
+                    if (it.name == "timeout") {
+                        "getTimeout"
+                    } else {
+                        it.name
+                    }
+                }
+                .toSet() +
                 MongoDatabase::class
                     .declaredMemberProperties
                     .filterNot { it.name == "wrapped" }
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt
index 2640e6250d7..b563c67c368 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.AggregateIterable as JAggregateIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.AggregateIterable
 import java.util.concurrent.TimeUnit
@@ -27,6 +28,9 @@ import org.bson.conversions.Bson
 internal class SyncAggregateIterable<T : Any>(val wrapped: AggregateIterable<T>) :
     JAggregateIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncAggregateIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 
     override fun toCollection() = wrapped.toCollection()
 
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt
index 53d791bd423..64cd27b776f 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt
@@ -20,6 +20,7 @@ import com.mongodb.ServerAddress
 import com.mongodb.TransactionOptions
 import com.mongodb.client.ClientSession as JClientSession
 import com.mongodb.client.TransactionBody
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.kotlin.client.ClientSession
 import com.mongodb.session.ServerSession
 import org.bson.BsonDocument
@@ -90,4 +91,6 @@ internal class SyncClientSession(internal val wrapped: ClientSession, private va
 
     override fun <T : Any> withTransaction(transactionBody: TransactionBody<T>, options: TransactionOptions): T =
         throw UnsupportedOperationException()
+
+    override fun getTimeoutContext(): TimeoutContext = throw UnsupportedOperationException()
 }
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt
index ef580954e20..91cf8165a3a 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.client.DistinctIterable as JDistinctIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.DistinctIterable
 import java.util.concurrent.TimeUnit
@@ -25,6 +26,9 @@ import org.bson.conversions.Bson
 internal class SyncDistinctIterable<T : Any>(val wrapped: DistinctIterable<T>) :
     JDistinctIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncDistinctIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
     override fun filter(filter: Bson?): SyncDistinctIterable<T> = apply { wrapped.filter(filter) }
     override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncDistinctIterable<T> = apply {
         wrapped.maxTime(maxTime, timeUnit)
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt
index f179f4ff6bc..81247aeb2a0 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt
@@ -18,6 +18,7 @@ package com.mongodb.kotlin.client.syncadapter
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.FindIterable as JFindIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.FindIterable
 import java.util.concurrent.TimeUnit
@@ -28,6 +29,7 @@ import org.bson.conversions.Bson
 internal class SyncFindIterable<T : Any>(val wrapped: FindIterable<T>) :
     JFindIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncFindIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable<T> = apply { wrapped.timeoutMode(timeoutMode) }
     override fun filter(filter: Bson?): SyncFindIterable<T> = apply { wrapped.filter(filter) }
 
     override fun limit(limit: Int): SyncFindIterable<T> = apply { wrapped.limit(limit) }
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt
index 74579b15a20..f38e7eed5e7 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.ListCollectionsIterable
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -25,6 +26,9 @@ internal class SyncListCollectionsIterable<T : Any>(val wrapped: ListCollections
     JListCollectionsIterable<T>, SyncMongoIterable<T>(wrapped) {
 
     override fun batchSize(batchSize: Int): SyncListCollectionsIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 
     override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable<T> = apply {
         wrapped.maxTime(maxTime, timeUnit)
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt
index 2e0e662a65d..34874827826 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.ListDatabasesIterable
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -25,6 +26,9 @@ internal class SyncListDatabasesIterable<T : Any>(val wrapped: ListDatabasesIter
     JListDatabasesIterable<T>, SyncMongoIterable<T>(wrapped) {
 
     override fun batchSize(batchSize: Int): SyncListDatabasesIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
 
     override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListDatabasesIterable<T> = apply {
         wrapped.maxTime(maxTime, timeUnit)
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt
index b9133970cb3..56e5fec91cd 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.client.ListIndexesIterable as JListIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.kotlin.client.ListIndexesIterable
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -23,6 +24,9 @@ import org.bson.BsonValue
 internal class SyncListIndexesIterable<T : Any>(val wrapped: ListIndexesIterable<T>) :
     JListIndexesIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncListIndexesIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
     override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListIndexesIterable<T> = apply {
         wrapped.maxTime(maxTime, timeUnit)
     }
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt
index c63a249eeb0..b0e6d522b7e 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client.syncadapter
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.kotlin.client.ListSearchIndexesIterable
 import java.util.concurrent.TimeUnit
@@ -26,6 +27,9 @@ import org.bson.Document
 internal class SyncListSearchIndexesIterable<T : Any>(val wrapped: ListSearchIndexesIterable<T>) :
     JListSearchIndexesIterable<T>, SyncMongoIterable<T>(wrapped) {
     override fun batchSize(batchSize: Int): SyncListSearchIndexesIterable<T> = apply { wrapped.batchSize(batchSize) }
+    override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable<T> = apply {
+        wrapped.timeoutMode(timeoutMode)
+    }
     override fun name(indexName: String): SyncListSearchIndexesIterable<T> = apply { wrapped.name(indexName) }
 
     override fun allowDiskUse(allowDiskUse: Boolean?): com.mongodb.client.ListSearchIndexesIterable<T> = apply {
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt
index 9c3af8af290..16660562a33 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt
@@ -15,75 +15,12 @@
  */
 package com.mongodb.kotlin.client.syncadapter
 
-import com.mongodb.ClientSessionOptions
-import com.mongodb.client.ChangeStreamIterable
-import com.mongodb.client.ClientSession
-import com.mongodb.client.ListDatabasesIterable
 import com.mongodb.client.MongoClient as JMongoClient
-import com.mongodb.client.MongoDatabase
-import com.mongodb.client.MongoIterable
 import com.mongodb.connection.ClusterDescription
 import com.mongodb.kotlin.client.MongoClient
-import org.bson.Document
-import org.bson.conversions.Bson
 
-internal class SyncMongoClient(val wrapped: MongoClient) : JMongoClient {
+internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient {
     override fun close(): Unit = wrapped.close()
 
-    override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName))
-
-    override fun startSession(): ClientSession = SyncClientSession(wrapped.startSession(), this)
-
-    override fun startSession(options: ClientSessionOptions): ClientSession =
-        SyncClientSession(wrapped.startSession(options), this)
-
-    override fun listDatabaseNames(): MongoIterable<String> = SyncMongoIterable(wrapped.listDatabaseNames())
-
-    override fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
-        SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped()))
-
-    override fun listDatabases(): ListDatabasesIterable<Document> = SyncListDatabasesIterable(wrapped.listDatabases())
-
-    override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
-        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped()))
-
-    override fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
-        SyncListDatabasesIterable(wrapped.listDatabases(resultClass))
-
-    override fun <T : Any> listDatabases(
-        clientSession: ClientSession,
-        resultClass: Class<T>
-    ): ListDatabasesIterable<T> =
-        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass))
-
-    override fun watch(): ChangeStreamIterable<Document> = SyncChangeStreamIterable(wrapped.watch())
-
-    override fun <T : Any> watch(resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass))
-
-    override fun watch(pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(pipeline))
-
-    override fun <T : Any> watch(pipeline: MutableList<out Bson>, resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass))
-
-    override fun watch(clientSession: ClientSession): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped()))
-
-    override fun <T : Any> watch(clientSession: ClientSession, resultClass: Class<T>): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass))
-
-    override fun watch(clientSession: ClientSession, pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline))
-
-    override fun <T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: MutableList<out Bson>,
-        resultClass: Class<T>
-    ): ChangeStreamIterable<T> =
-        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass))
-
     override fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription
-
-    private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped
 }
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt
new file mode 100644
index 00000000000..7b948fa6d1d
--- /dev/null
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.kotlin.client.syncadapter
+
+import com.mongodb.ClientSessionOptions
+import com.mongodb.ReadConcern
+import com.mongodb.ReadPreference
+import com.mongodb.WriteConcern
+import com.mongodb.client.ChangeStreamIterable
+import com.mongodb.client.ClientSession
+import com.mongodb.client.ListDatabasesIterable
+import com.mongodb.client.MongoCluster as JMongoCluster
+import com.mongodb.client.MongoDatabase
+import com.mongodb.client.MongoIterable
+import com.mongodb.kotlin.client.MongoCluster
+import java.util.concurrent.TimeUnit
+import org.bson.Document
+import org.bson.codecs.configuration.CodecRegistry
+import org.bson.conversions.Bson
+
+internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster {
+    override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry
+
+    override fun getReadPreference(): ReadPreference = wrapped.readPreference
+
+    override fun getWriteConcern(): WriteConcern = wrapped.writeConcern
+
+    override fun getReadConcern(): ReadConcern = wrapped.readConcern
+
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
+
+    override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry))
+
+    override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withReadPreference(readPreference))
+
+    override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withReadConcern(readConcern))
+
+    override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withWriteConcern(writeConcern))
+
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster =
+        SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit))
+
+    override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName))
+
+    override fun startSession(): ClientSession = SyncClientSession(wrapped.startSession(), this)
+
+    override fun startSession(options: ClientSessionOptions): ClientSession =
+        SyncClientSession(wrapped.startSession(options), this)
+
+    override fun listDatabaseNames(): MongoIterable<String> = SyncMongoIterable(wrapped.listDatabaseNames())
+
+    override fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
+        SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped()))
+
+    override fun listDatabases(): ListDatabasesIterable<Document> = SyncListDatabasesIterable(wrapped.listDatabases())
+
+    override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
+        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped()))
+
+    override fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
+        SyncListDatabasesIterable(wrapped.listDatabases(resultClass))
+
+    override fun <T : Any> listDatabases(
+        clientSession: ClientSession,
+        resultClass: Class<T>
+    ): ListDatabasesIterable<T> =
+        SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass))
+
+    override fun watch(): ChangeStreamIterable<Document> = SyncChangeStreamIterable(wrapped.watch())
+
+    override fun <T : Any> watch(resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass))
+
+    override fun watch(pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(pipeline))
+
+    override fun <T : Any> watch(pipeline: MutableList<out Bson>, resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass))
+
+    override fun watch(clientSession: ClientSession): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped()))
+
+    override fun <T : Any> watch(clientSession: ClientSession, resultClass: Class<T>): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass))
+
+    override fun watch(clientSession: ClientSession, pipeline: MutableList<out Bson>): ChangeStreamIterable<Document> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline))
+
+    override fun <T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: MutableList<out Bson>,
+        resultClass: Class<T>
+    ): ChangeStreamIterable<T> =
+        SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass))
+
+    private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped
+}
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt
index 952b05d32e5..51c3a7db7e1 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt
@@ -56,6 +56,7 @@ import com.mongodb.client.result.InsertOneResult
 import com.mongodb.client.result.UpdateResult
 import com.mongodb.kotlin.client.MongoCollection
 import java.lang.UnsupportedOperationException
+import java.util.concurrent.TimeUnit
 import org.bson.Document
 import org.bson.codecs.configuration.CodecRegistry
 import org.bson.conversions.Bson
@@ -73,6 +74,7 @@ internal class SyncMongoCollection<T : Any>(val wrapped: MongoCollection<T>) : J
     override fun getWriteConcern(): WriteConcern = wrapped.writeConcern
 
     override fun getReadConcern(): ReadConcern = wrapped.readConcern
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
 
     override fun <R : Any> withDocumentClass(clazz: Class<R>): SyncMongoCollection<R> =
         SyncMongoCollection(wrapped.withDocumentClass(clazz))
@@ -89,6 +91,9 @@ internal class SyncMongoCollection<T : Any>(val wrapped: MongoCollection<T>) : J
     override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection<T> =
         SyncMongoCollection(wrapped.withReadConcern(readConcern))
 
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection<T> =
+        SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit))
+
     override fun countDocuments(): Long = wrapped.countDocuments()
 
     override fun countDocuments(filter: Bson): Long = wrapped.countDocuments(filter)
diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt
index 84a97bc2769..1111ee282ca 100644
--- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt
+++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt
@@ -23,6 +23,7 @@ import com.mongodb.client.MongoDatabase as JMongoDatabase
 import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.client.model.CreateViewOptions
 import com.mongodb.kotlin.client.MongoDatabase
+import java.util.concurrent.TimeUnit
 import org.bson.Document
 import org.bson.codecs.configuration.CodecRegistry
 import org.bson.conversions.Bson
@@ -38,6 +39,8 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase {
 
     override fun getReadConcern(): ReadConcern = wrapped.readConcern
 
+    override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit)
+
     override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase =
         SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry))
 
@@ -50,6 +53,9 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase {
     override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase =
         SyncMongoDatabase(wrapped.withReadConcern(readConcern))
 
+    override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase =
+        SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit))
+
     override fun getCollection(collectionName: String): MongoCollection<Document> =
         SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java))
 
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt
index 4940cad99d0..b5449a14645 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt
@@ -16,7 +16,10 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.AggregateIterable as JAggregateIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -30,14 +33,32 @@ import org.bson.conversions.Bson
  * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate)
  */
 public class AggregateIterable<T : Any>(private val wrapped: JAggregateIterable<T>) : MongoIterable<T>(wrapped) {
+
+    public override fun batchSize(batchSize: Int): AggregateIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
     /**
-     * Sets the number of documents to return per batch.
+     * Sets the timeoutMode for the cursor.
      *
-     * @param batchSize the batch size
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * If the `timeout` is set then:
+     * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME]
+     * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure
+     *   it as: [TimeoutMode.CURSOR_LIFETIME]
+     *
+     * @param timeoutMode the timeout mode
      * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     * @since 5.2
      */
-    public override fun batchSize(batchSize: Int): AggregateIterable<T> = apply { wrapped.batchSize(batchSize) }
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): AggregateIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
 
     /**
      * Aggregates documents according to the specified aggregation pipeline, which must end with a $out or $merge stage.
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt
index 95660682f0b..cf7cc35b0b0 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt
@@ -37,6 +37,11 @@ import org.bson.BsonValue
 public class ChangeStreamIterable<T : Any>(private val wrapped: JChangeStreamIterable<T>) :
     MongoIterable<ChangeStreamDocument<T>>(wrapped) {
 
+    public override fun batchSize(batchSize: Int): ChangeStreamIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
     /**
      * Returns a cursor used for iterating over elements of type {@code ChangeStreamDocument<TResult>}. The cursor has a
      * covariant return type to additionally provide a method to access the resume token in change stream batches.
@@ -77,15 +82,6 @@ public class ChangeStreamIterable<T : Any>(private val wrapped: JChangeStreamIte
         wrapped.resumeAfter(resumeToken)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public override fun batchSize(batchSize: Int): ChangeStreamIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the maximum await execution time on the server for this operation.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt
index de77215d033..f785eeca7e4 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt
@@ -15,7 +15,10 @@
  */
 package com.mongodb.kotlin.client
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.DistinctIterable as JDistinctIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -28,6 +31,7 @@ import org.bson.conversions.Bson
  * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/)
  */
 public class DistinctIterable<T : Any?>(private val wrapped: JDistinctIterable<T>) : MongoIterable<T>(wrapped) {
+
     /**
      * Sets the number of documents to return per batch.
      *
@@ -37,6 +41,19 @@ public class DistinctIterable<T : Any?>(private val wrapped: JDistinctIterable<T
      */
     public override fun batchSize(batchSize: Int): DistinctIterable<T> = apply { wrapped.batchSize(batchSize) }
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): DistinctIterable<T> = apply { wrapped.timeoutMode(timeoutMode) }
+
     /**
      * Sets the query filter to apply to the query.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt
index 2a33cb6f268..81e1bb51864 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt
@@ -17,7 +17,10 @@ package com.mongodb.kotlin.client
 
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.FindIterable as JFindIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -31,14 +34,32 @@ import org.bson.conversions.Bson
  * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/)
  */
 public class FindIterable<T : Any>(private val wrapped: JFindIterable<T>) : MongoIterable<T>(wrapped) {
+
+    public override fun batchSize(batchSize: Int): FindIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
     /**
-     * Sets the number of documents to return per batch.
+     * Sets the timeoutMode for the cursor.
      *
-     * @param batchSize the batch size
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * If the `timeout` is set then:
+     * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME]
+     * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure
+     *   it as: [TimeoutMode.CURSOR_LIFETIME]
+     *
+     * @param timeoutMode the timeout mode
      * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
+     * @since 5.2
      */
-    public override fun batchSize(batchSize: Int): FindIterable<T> = apply { wrapped.batchSize(batchSize) }
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): FindIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
 
     /**
      * Sets the query filter to apply to the query.
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt
index 6ff8bc9c3fa..43b2a9ba510 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt
@@ -15,7 +15,10 @@
  */
 package com.mongodb.kotlin.client
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
 import org.bson.conversions.Bson
@@ -28,6 +31,28 @@ import org.bson.conversions.Bson
  */
 public class ListCollectionsIterable<T : Any>(private val wrapped: JListCollectionsIterable<T>) :
     MongoIterable<T>(wrapped) {
+
+    public override fun batchSize(batchSize: Int): ListCollectionsIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -40,15 +65,6 @@ public class ListCollectionsIterable<T : Any>(private val wrapped: JListCollecti
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public override fun batchSize(batchSize: Int): ListCollectionsIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the query filter to apply to the returned database names.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt
index 560920b5e0d..dd9e1e0bcc8 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt
@@ -15,7 +15,10 @@
  */
 package com.mongodb.kotlin.client
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
 import org.bson.conversions.Bson
@@ -28,6 +31,28 @@ import org.bson.conversions.Bson
  */
 public class ListDatabasesIterable<T : Any>(private val wrapped: JListDatabasesIterable<T>) :
     MongoIterable<T>(wrapped) {
+
+    public override fun batchSize(batchSize: Int): ListDatabasesIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -40,15 +65,6 @@ public class ListDatabasesIterable<T : Any>(private val wrapped: JListDatabasesI
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public override fun batchSize(batchSize: Int): ListDatabasesIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the query filter to apply to the returned database names.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt
index 36847cb49d8..cc4449384b8 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt
@@ -15,7 +15,10 @@
  */
 package com.mongodb.kotlin.client
 
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.ListIndexesIterable as JListIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
 
@@ -26,6 +29,28 @@ import org.bson.BsonValue
  * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/)
  */
 public class ListIndexesIterable<T : Any>(private val wrapped: JListIndexesIterable<T>) : MongoIterable<T>(wrapped) {
+
+    public override fun batchSize(batchSize: Int): ListIndexesIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
@@ -38,15 +63,6 @@ public class ListIndexesIterable<T : Any>(private val wrapped: JListIndexesItera
         wrapped.maxTime(maxTime, timeUnit)
     }
 
-    /**
-     * Sets the number of documents to return per batch.
-     *
-     * @param batchSize the batch size
-     * @return this
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
-     */
-    public override fun batchSize(batchSize: Int): ListIndexesIterable<T> = apply { wrapped.batchSize(batchSize) }
-
     /**
      * Sets the comment for this operation. A null value means no comment is set.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt
index 5b370702923..aa0dc1664bd 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt
@@ -16,7 +16,10 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import org.bson.BsonValue
@@ -31,22 +34,34 @@ import org.bson.Document
 public class ListSearchIndexesIterable<T : Any>(private val wrapped: JListSearchIndexesIterable<T>) :
     MongoIterable<T>(wrapped) {
 
+    public override fun batchSize(batchSize: Int): ListSearchIndexesIterable<T> {
+        super.batchSize(batchSize)
+        return this
+    }
+
     /**
-     * Sets an Atlas Search index name for this operation.
+     * Sets the timeoutMode for the cursor.
      *
-     * @param indexName Atlas Search index name.
-     * @return this.
+     * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via
+     * [MongoCollection]
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
      */
-    public fun name(indexName: String): ListSearchIndexesIterable<T> = apply { wrapped.name(indexName) }
+    @Alpha(Reason.CLIENT)
+    public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable<T> {
+        wrapped.timeoutMode(timeoutMode)
+        return this
+    }
 
     /**
-     * Sets the number of documents to return per batch.
+     * Sets an Atlas Search index name for this operation.
      *
-     * @param batchSize the batch size.
+     * @param indexName Atlas Search index name.
      * @return this.
-     * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize)
      */
-    public override fun batchSize(batchSize: Int): ListSearchIndexesIterable<T> = apply { wrapped.batchSize(batchSize) }
+    public fun name(indexName: String): ListSearchIndexesIterable<T> = apply { wrapped.name(indexName) }
 
     /**
      * Enables writing to temporary files. A null value indicates that it's unspecified.
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt
index 4cae28c973f..bdf2ba30bd5 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt
@@ -23,8 +23,7 @@ import com.mongodb.client.MongoClient as JMongoClient
 import com.mongodb.client.MongoClients as JMongoClients
 import com.mongodb.connection.ClusterDescription
 import java.io.Closeable
-import org.bson.Document
-import org.bson.conversions.Bson
+import java.util.concurrent.TimeUnit
 
 /**
  * A client-side representation of a MongoDB cluster.
@@ -38,7 +37,7 @@ import org.bson.conversions.Bson
  *
  * @see MongoClient.create
  */
-public class MongoClient(private val wrapped: JMongoClient) : Closeable {
+public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable {
 
     /**
      * A factory for [MongoClient] instances.
@@ -108,175 +107,13 @@ public class MongoClient(private val wrapped: JMongoClient) : Closeable {
      */
     public val clusterDescription: ClusterDescription
         get() = wrapped.clusterDescription
-
-    /**
-     * Gets a [MongoDatabase] instance for the given database name.
-     *
-     * @param databaseName the name of the database to retrieve
-     * @return a `MongoDatabase` representing the specified database
-     * @throws IllegalArgumentException if databaseName is invalid
-     * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity
-     */
-    public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName))
-
-    /**
-     * Creates a client session.
-     *
-     * Note: A ClientSession instance can not be used concurrently in multiple operations.
-     *
-     * @param options the options for the client session
-     * @return the client session
-     */
-    public fun startSession(options: ClientSessionOptions = ClientSessionOptions.builder().build()): ClientSession =
-        ClientSession(wrapped.startSession(options))
-
-    /**
-     * Get a list of the database names
-     *
-     * @return an iterable containing all the names of all the databases
-     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
-     */
-    public fun listDatabaseNames(): MongoIterable<String> = MongoIterable(wrapped.listDatabaseNames())
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the list databases iterable interface
-     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
-     */
-    public fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
-        MongoIterable(wrapped.listDatabaseNames(clientSession.wrapped))
-
-    /**
-     * Gets the list of databases
-     *
-     * @return the list databases iterable interface
-     */
-    @JvmName("listDatabasesAsDocument")
-    public fun listDatabases(): ListDatabasesIterable<Document> = listDatabases<Document>()
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the list databases iterable interface
-     */
-    @JvmName("listDatabasesAsDocumentWithSession")
-    public fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
-        listDatabases<Document>(clientSession)
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @param resultClass the target document type of the iterable.
-     * @return the list databases iterable interface
-     */
-    public fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
-        ListDatabasesIterable(wrapped.listDatabases(resultClass))
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @param clientSession the client session with which to associate this operation
-     * @param resultClass the target document type of the iterable.
-     * @return the list databases iterable interface
-     */
-    public fun <T : Any> listDatabases(clientSession: ClientSession, resultClass: Class<T>): ListDatabasesIterable<T> =
-        ListDatabasesIterable(wrapped.listDatabases(clientSession.wrapped, resultClass))
-
-    /**
-     * Gets the list of databases
-     *
-     * @param T the type of the class to use
-     * @return the list databases iterable interface
-     */
-    public inline fun <reified T : Any> listDatabases(): ListDatabasesIterable<T> = listDatabases(T::class.java)
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param T the type of the class to use
-     * @return the list databases iterable interface
-     */
-    public inline fun <reified T : Any> listDatabases(clientSession: ClientSession): ListDatabasesIterable<T> =
-        listDatabases(clientSession, T::class.java)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    @JvmName("watchAsDocument")
-    public fun watch(pipeline: List<Bson> = emptyList()): ChangeStreamIterable<Document> = watch<Document>(pipeline)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    @JvmName("watchAsDocumentWithSession")
-    public fun watch(clientSession: ClientSession, pipeline: List<Bson> = emptyList()): ChangeStreamIterable<Document> =
-        watch<Document>(clientSession, pipeline)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @param resultClass the target document type of the iterable.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public fun <T : Any> watch(pipeline: List<Bson> = emptyList(), resultClass: Class<T>): ChangeStreamIterable<T> =
-        ChangeStreamIterable(wrapped.watch(pipeline, resultClass))
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @param resultClass the target document type of the iterable.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public fun <T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: List<Bson> = emptyList(),
-        resultClass: Class<T>
-    ): ChangeStreamIterable<T> = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass))
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public inline fun <reified T : Any> watch(pipeline: List<Bson> = emptyList()): ChangeStreamIterable<T> =
-        watch(pipeline, T::class.java)
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param T the target document type of the iterable.
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
-     * @return the change stream iterable
-     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
-     */
-    public inline fun <reified T : Any> watch(
-        clientSession: ClientSession,
-        pipeline: List<Bson> = emptyList()
-    ): ChangeStreamIterable<T> = watch(clientSession, pipeline, T::class.java)
 }
+
+/**
+ * ClientSessionOptions.Builder.defaultTimeout extension function
+ *
+ * @param defaultTimeout time in milliseconds
+ * @return the options
+ */
+public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder =
+    this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) }
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt
new file mode 100644
index 00000000000..f541aaf1a9f
--- /dev/null
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.kotlin.client
+
+import com.mongodb.ClientSessionOptions
+import com.mongodb.ReadConcern
+import com.mongodb.ReadPreference
+import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
+import com.mongodb.client.MongoCluster as JMongoCluster
+import java.util.concurrent.TimeUnit
+import org.bson.Document
+import org.bson.codecs.configuration.CodecRegistry
+import org.bson.conversions.Bson
+
+/**
+ * The client-side representation of a MongoDB cluster operations.
+ *
+ * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed,
+ * then any operations will fail.
+ *
+ * @see MongoClient
+ * @since 5.2
+ */
+public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) {
+
+    /** The codec registry. */
+    public val codecRegistry: CodecRegistry
+        get() = wrapped.codecRegistry
+
+    /** The read concern. */
+    public val readConcern: ReadConcern
+        get() = wrapped.readConcern
+
+    /** The read preference. */
+    public val readPreference: ReadPreference
+        get() = wrapped.readPreference
+
+    /** The write concern. */
+    public val writeConcern: WriteConcern
+        get() = wrapped.writeConcern
+
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
+    /**
+     * Create a new MongoCluster instance with a different codec registry.
+     *
+     * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of
+     * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining
+     * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances
+     * using the factory methods in [org.bson.codecs.configuration.CodecRegistries].
+     *
+     * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database
+     * @return a new MongoCluster instance with the different codec registry
+     * @see org.bson.codecs.configuration.CodecRegistries
+     */
+    public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster =
+        MongoCluster(wrapped.withCodecRegistry(newCodecRegistry))
+
+    /**
+     * Create a new MongoCluster instance with a different read preference.
+     *
+     * @param newReadPreference the new [ReadPreference] for the database
+     * @return a new MongoCluster instance with the different readPreference
+     */
+    public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster =
+        MongoCluster(wrapped.withReadPreference(newReadPreference))
+
+    /**
+     * Create a new MongoCluster instance with a different read concern.
+     *
+     * @param newReadConcern the new [ReadConcern] for the database
+     * @return a new MongoCluster instance with the different ReadConcern
+     * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/)
+     */
+    public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster =
+        MongoCluster(wrapped.withReadConcern(newReadConcern))
+
+    /**
+     * Create a new MongoCluster instance with a different write concern.
+     *
+     * @param newWriteConcern the new [WriteConcern] for the database
+     * @return a new MongoCluster instance with the different writeConcern
+     */
+    public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster =
+        MongoCluster(wrapped.withWriteConcern(newWriteConcern))
+
+    /**
+     * Create a new MongoCluster instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoCluster instance with the set time limit for operations
+     * @see [MongoDatabase.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster =
+        MongoCluster(wrapped.withTimeout(timeout, timeUnit))
+
+    /**
+     * Gets a [MongoDatabase] instance for the given database name.
+     *
+     * @param databaseName the name of the database to retrieve
+     * @return a `MongoDatabase` representing the specified database
+     * @throws IllegalArgumentException if databaseName is invalid
+     * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity
+     */
+    public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName))
+
+    /**
+     * Creates a client session.
+     *
+     * Note: A ClientSession instance can not be used concurrently in multiple operations.
+     *
+     * @param options the options for the client session
+     * @return the client session
+     */
+    public fun startSession(options: ClientSessionOptions = ClientSessionOptions.builder().build()): ClientSession =
+        ClientSession(wrapped.startSession(options))
+
+    /**
+     * Get a list of the database names
+     *
+     * @return an iterable containing all the names of all the databases
+     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
+     */
+    public fun listDatabaseNames(): MongoIterable<String> = MongoIterable(wrapped.listDatabaseNames())
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases)
+     */
+    public fun listDatabaseNames(clientSession: ClientSession): MongoIterable<String> =
+        MongoIterable(wrapped.listDatabaseNames(clientSession.wrapped))
+
+    /**
+     * Gets the list of databases
+     *
+     * @return the list databases iterable interface
+     */
+    @JvmName("listDatabasesAsDocument")
+    public fun listDatabases(): ListDatabasesIterable<Document> = listDatabases<Document>()
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     */
+    @JvmName("listDatabasesAsDocumentWithSession")
+    public fun listDatabases(clientSession: ClientSession): ListDatabasesIterable<Document> =
+        listDatabases<Document>(clientSession)
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @param resultClass the target document type of the iterable.
+     * @return the list databases iterable interface
+     */
+    public fun <T : Any> listDatabases(resultClass: Class<T>): ListDatabasesIterable<T> =
+        ListDatabasesIterable(wrapped.listDatabases(resultClass))
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the target document type of the iterable.
+     * @return the list databases iterable interface
+     */
+    public fun <T : Any> listDatabases(clientSession: ClientSession, resultClass: Class<T>): ListDatabasesIterable<T> =
+        ListDatabasesIterable(wrapped.listDatabases(clientSession.wrapped, resultClass))
+
+    /**
+     * Gets the list of databases
+     *
+     * @param T the type of the class to use
+     * @return the list databases iterable interface
+     */
+    public inline fun <reified T : Any> listDatabases(): ListDatabasesIterable<T> = listDatabases(T::class.java)
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param T the type of the class to use
+     * @return the list databases iterable interface
+     */
+    public inline fun <reified T : Any> listDatabases(clientSession: ClientSession): ListDatabasesIterable<T> =
+        listDatabases(clientSession, T::class.java)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    @JvmName("watchAsDocument")
+    public fun watch(pipeline: List<Bson> = emptyList()): ChangeStreamIterable<Document> = watch<Document>(pipeline)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    @JvmName("watchAsDocumentWithSession")
+    public fun watch(clientSession: ClientSession, pipeline: List<Bson> = emptyList()): ChangeStreamIterable<Document> =
+        watch<Document>(clientSession, pipeline)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @param resultClass the target document type of the iterable.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public fun <T : Any> watch(pipeline: List<Bson> = emptyList(), resultClass: Class<T>): ChangeStreamIterable<T> =
+        ChangeStreamIterable(wrapped.watch(pipeline, resultClass))
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @param resultClass the target document type of the iterable.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public fun <T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: List<Bson> = emptyList(),
+        resultClass: Class<T>
+    ): ChangeStreamIterable<T> = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass))
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public inline fun <reified T : Any> watch(pipeline: List<Bson> = emptyList()): ChangeStreamIterable<T> =
+        watch(pipeline, T::class.java)
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param T the target document type of the iterable.
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline.
+     * @return the change stream iterable
+     * @see [Change Streams](https://dochub.mongodb.org/changestreams]
+     */
+    public inline fun <reified T : Any> watch(
+        clientSession: ClientSession,
+        pipeline: List<Bson> = emptyList()
+    ): ChangeStreamIterable<T> = watch(clientSession, pipeline, T::class.java)
+}
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt
index 786140caf12..9521c502460 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt
@@ -19,6 +19,8 @@ import com.mongodb.MongoNamespace
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.bulk.BulkWriteResult
 import com.mongodb.client.MongoCollection as JMongoCollection
 import com.mongodb.client.model.BulkWriteOptions
@@ -84,6 +86,28 @@ public class MongoCollection<T : Any>(private val wrapped: JMongoCollection<T>)
     public val writeConcern: WriteConcern
         get() = wrapped.writeConcern
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
     /**
      * Create a new collection instance with a different default class to cast any documents returned from the database
      * into.
@@ -147,6 +171,21 @@ public class MongoCollection<T : Any>(private val wrapped: JMongoCollection<T>)
     public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection<T> =
         MongoCollection(wrapped.withWriteConcern(newWriteConcern))
 
+    /**
+     * Create a new MongoCollection instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoCollection instance with the set time limit for operations
+     * @see [MongoCollection.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection<T> =
+        MongoCollection(wrapped.withTimeout(timeout, timeUnit))
+
     /**
      * Counts the number of documents in the collection.
      *
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt
index b407195b079..714e82fa78e 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt
@@ -76,6 +76,14 @@ public sealed interface MongoCursor<T : Any?> : Iterator<T>, Closeable {
  *  }
  * ```
  *
+ * A [com.mongodb.MongoOperationTimeoutException] does not invalidate the [MongoChangeStreamCursor], but is immediately
+ * propagated to the caller. Subsequent method calls will attempt to resume operation by establishing a new change
+ * stream on the server, without performing a `getMore` request first.
+ *
+ * If a [com.mongodb.MongoOperationTimeoutException] occurs before any events are received, it indicates that the server
+ * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the
+ * current stream and recreate it with a higher timeout setting.
+ *
  * @param T The type of documents the cursor contains
  */
 public sealed interface MongoChangeStreamCursor<T : Any> : MongoCursor<T> {
diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt
index 988db01485a..d59ba628008 100644
--- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt
+++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt
@@ -18,6 +18,8 @@ package com.mongodb.kotlin.client
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.WriteConcern
+import com.mongodb.annotations.Alpha
+import com.mongodb.annotations.Reason
 import com.mongodb.client.MongoDatabase as JMongoDatabase
 import com.mongodb.client.model.CreateCollectionOptions
 import com.mongodb.client.model.CreateViewOptions
@@ -53,6 +55,28 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     public val writeConcern: WriteConcern
         get() = wrapped.writeConcern
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+     * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+     * - `null` means that the timeout mechanism for operations will defer to using:
+     *     - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to
+     *       become available
+     *     - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+     *     - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out.
+     *     - `maxTimeMS`: The time limit for processing operations on a cursor. See:
+     *       [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+     *     - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+     * - `0` means infinite timeout.
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @return the optional timeout duration
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit)
+
     /**
      * Create a new MongoDatabase instance with a different codec registry.
      *
@@ -96,6 +120,21 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase =
         MongoDatabase(wrapped.withWriteConcern(newWriteConcern))
 
+    /**
+     * Create a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     * - `0` means an infinite timeout
+     * - `> 0` The time limit to use for the full execution of an operation.
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit, defaults to Milliseconds
+     * @return a new MongoDatabase instance with the set time limit for operations
+     * @see [MongoDatabase.timeout]
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase =
+        MongoDatabase(wrapped.withTimeout(timeout, timeUnit))
+
     /**
      * Gets a collection.
      *
@@ -120,6 +159,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
      *   [MongoDatabase.readPreference]
@@ -131,6 +173,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
@@ -146,6 +191,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
@@ -162,6 +210,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
@@ -180,6 +231,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param command the command to be run
      * @param readPreference the [ReadPreference] to be used when executing the command, defaults to
@@ -194,6 +248,9 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and
+     * the `timeoutMS` setting has been set.
+     *
      * @param T the class to decode each document into
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt
index ce1ed2dea47..89cc8db421e 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt
@@ -17,6 +17,7 @@ package com.mongodb.kotlin.client
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.AggregateIterable as JAggregateIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -79,6 +80,7 @@ class AggregateIterableTest {
         iterable.maxAwaitTime(1, TimeUnit.SECONDS)
         iterable.maxTime(1)
         iterable.maxTime(1, TimeUnit.SECONDS)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).allowDiskUse(true)
         verify(wrapped).batchSize(batchSize)
@@ -96,6 +98,7 @@ class AggregateIterableTest {
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
         verify(wrapped).let(bson)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         iterable.toCollection()
         verify(wrapped).toCollection()
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt
index 63309969104..c3c4772f9d6 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt
@@ -45,6 +45,7 @@ class ClientSessionTest {
                 "getServerSession",
                 "getSnapshotTimestamp",
                 "getTransactionContext",
+                "getTimeoutContext",
                 "notifyMessageSent",
                 "notifyOperationInitiated",
                 "setRecoveryToken",
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt
index c9fc79e8128..91f5e9b6f44 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.client.DistinctIterable as JDistinctIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -31,7 +32,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions
 class DistinctIterableTest {
     @Test
     fun shouldHaveTheSameMethods() {
-        val jDistinctIterableFunctions = JDistinctIterable::class.declaredFunctions.map { it.name }.toSet()
+        val jDistinctIterableFunctions =
+            JDistinctIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode"
         val kDistinctIterableFunctions = DistinctIterable::class.declaredFunctions.map { it.name }.toSet()
 
         assertEquals(jDistinctIterableFunctions, kDistinctIterableFunctions)
@@ -55,6 +57,7 @@ class DistinctIterableTest {
         iterable.filter(filter)
         iterable.maxTime(1)
         iterable.maxTime(1, TimeUnit.SECONDS)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).collation(collation)
@@ -63,6 +66,7 @@ class DistinctIterableTest {
         verify(wrapped).filter(filter)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt
index f0e7698124b..29374ff5c6b 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt
@@ -29,6 +29,7 @@ class ExtensionMethodsTest {
                 "CountOptions",
                 "CreateCollectionOptions",
                 "CreateIndexOptions",
+                "ClientSessionOptions",
                 "DropIndexOptions",
                 "EstimatedDocumentCountOptions",
                 "FindOneAndDeleteOptions",
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt
index 9d8d28104d1..0f4b2725b2e 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt
@@ -18,6 +18,7 @@ package com.mongodb.kotlin.client
 import com.mongodb.CursorType
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.FindIterable as JFindIterable
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
@@ -31,7 +32,7 @@ import org.mockito.kotlin.*
 class FindIterableTest {
     @Test
     fun shouldHaveTheSameMethods() {
-        val jFindIterableFunctions = JFindIterable::class.declaredFunctions.map { it.name }.toSet()
+        val jFindIterableFunctions = JFindIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode"
         val kFindIterableFunctions = FindIterable::class.declaredFunctions.map { it.name }.toSet()
 
         assertEquals(jFindIterableFunctions, kFindIterableFunctions)
@@ -86,6 +87,7 @@ class FindIterableTest {
         iterable.showRecordId(true)
         iterable.skip(1)
         iterable.sort(bson)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).allowDiskUse(true)
         verify(wrapped).batchSize(batchSize)
@@ -114,6 +116,7 @@ class FindIterableTest {
         verify(wrapped).showRecordId(true)
         verify(wrapped).skip(1)
         verify(wrapped).sort(bson)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt
index b0c23b331e4..26dd071768c 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
 import kotlin.test.assertEquals
@@ -53,6 +54,7 @@ class ListCollectionsIterableTest {
         iterable.filter(filter)
         iterable.maxTime(1)
         iterable.maxTime(1, TimeUnit.SECONDS)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).comment(bsonComment)
@@ -60,6 +62,7 @@ class ListCollectionsIterableTest {
         verify(wrapped).filter(filter)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt
index c10ef133c1d..a1c95cad1a0 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
 import kotlin.test.assertEquals
@@ -30,7 +31,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions
 class ListDatabasesIterableTest {
     @Test
     fun shouldHaveTheSameMethods() {
-        val jListDatabasesIterableFunctions = JListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet()
+        val jListDatabasesIterableFunctions =
+            JListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode"
         val kListDatabasesIterableFunctions = ListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet()
 
         assertEquals(jListDatabasesIterableFunctions, kListDatabasesIterableFunctions)
@@ -54,6 +56,7 @@ class ListDatabasesIterableTest {
         iterable.maxTime(1)
         iterable.maxTime(1, TimeUnit.SECONDS)
         iterable.nameOnly(true)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).authorizedDatabasesOnly(true)
         verify(wrapped).batchSize(batchSize)
@@ -63,6 +66,7 @@ class ListDatabasesIterableTest {
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
         verify(wrapped).nameOnly(true)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt
index 70c799eeee4..08bd5b4e685 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt
@@ -16,6 +16,7 @@
 package com.mongodb.kotlin.client
 
 import com.mongodb.client.ListIndexesIterable as JListIndexesIterable
+import com.mongodb.client.cursor.TimeoutMode
 import java.util.concurrent.TimeUnit
 import kotlin.reflect.full.declaredFunctions
 import kotlin.test.assertEquals
@@ -29,7 +30,8 @@ import org.mockito.kotlin.verifyNoMoreInteractions
 class ListIndexesIterableTest {
     @Test
     fun shouldHaveTheSameMethods() {
-        val jListIndexesIterableFunctions = JListIndexesIterable::class.declaredFunctions.map { it.name }.toSet()
+        val jListIndexesIterableFunctions =
+            JListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode"
         val kListIndexesIterableFunctions = ListIndexesIterable::class.declaredFunctions.map { it.name }.toSet()
 
         assertEquals(jListIndexesIterableFunctions, kListIndexesIterableFunctions)
@@ -49,12 +51,14 @@ class ListIndexesIterableTest {
         iterable.comment(comment)
         iterable.maxTime(1)
         iterable.maxTime(1, TimeUnit.SECONDS)
+        iterable.timeoutMode(TimeoutMode.ITERATION)
 
         verify(wrapped).batchSize(batchSize)
         verify(wrapped).comment(bsonComment)
         verify(wrapped).comment(comment)
         verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS)
         verify(wrapped).maxTime(1, TimeUnit.SECONDS)
+        verify(wrapped).timeoutMode(TimeoutMode.ITERATION)
 
         verifyNoMoreInteractions(wrapped)
     }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt
index d458c9302ce..e27b7852bba 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt
@@ -71,7 +71,16 @@ class MongoCollectionTest {
     fun shouldHaveTheSameMethods() {
         val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() - "mapReduce"
         val kMongoCollectionFunctions =
-            MongoCollection::class.declaredFunctions.map { it.name }.toSet() +
+            MongoCollection::class
+                .declaredFunctions
+                .map {
+                    if (it.name == "timeout") {
+                        "getTimeout"
+                    } else {
+                        it.name
+                    }
+                }
+                .toSet() +
                 MongoCollection::class
                     .declaredMemberProperties
                     .filterNot { it.name == "wrapped" }
diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt
index 6a7264545dc..1a7bc1d25c2 100644
--- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt
+++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt
@@ -52,7 +52,16 @@ class MongoDatabaseTest {
     fun shouldHaveTheSameMethods() {
         val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet()
         val kMongoDatabaseFunctions =
-            MongoDatabase::class.declaredFunctions.map { it.name }.toSet() +
+            MongoDatabase::class
+                .declaredFunctions
+                .map {
+                    if (it.name == "timeout") {
+                        "getTimeout"
+                    } else {
+                        it.name
+                    }
+                }
+                .toSet() +
                 MongoDatabase::class
                     .declaredMemberProperties
                     .filterNot { it.name == "wrapped" }
diff --git a/driver-legacy/src/main/com/mongodb/DB.java b/driver-legacy/src/main/com/mongodb/DB.java
index df3a7b41076..7b47cfb8515 100644
--- a/driver-legacy/src/main/com/mongodb/DB.java
+++ b/driver-legacy/src/main/com/mongodb/DB.java
@@ -23,6 +23,7 @@
 import com.mongodb.client.model.DBCreateViewOptions;
 import com.mongodb.client.model.ValidationAction;
 import com.mongodb.client.model.ValidationLevel;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.CommandReadOperation;
 import com.mongodb.internal.operation.CreateCollectionOperation;
@@ -220,11 +221,15 @@ public String getName() {
     public Set<String> getCollectionNames() {
         List<String> collectionNames =
                 new MongoIterableImpl<DBObject>(null, executor, ReadConcern.DEFAULT, primary(),
-                        mongo.getMongoClientOptions().getRetryReads()) {
+                                                mongo.getMongoClientOptions().getRetryReads(), DB.this.getTimeoutSettings()) {
                     @Override
                     public ReadOperation<BatchCursor<DBObject>> asReadOperation() {
-                        return new ListCollectionsOperation<>(name, commandCodec)
-                                .nameOnly(true);
+                        return new ListCollectionsOperation<>(name, commandCodec).nameOnly(true);
+                    }
+
+                    @Override
+                    protected OperationExecutor getExecutor() {
+                        return executor;
                     }
                 }.map(result -> (String) result.get("name")).into(new ArrayList<>());
         Collections.sort(collectionNames);
@@ -304,8 +309,9 @@ public DBCollection createView(final String viewName, final String viewOn, final
         try {
             notNull("options", options);
             DBCollection view = getCollection(viewName);
-            executor.execute(new CreateViewOperation(name, viewName, viewOn, view.preparePipeline(pipeline), writeConcern)
-                                     .collation(options.getCollation()), getReadConcern());
+            executor.execute(new CreateViewOperation(name, viewName, viewOn,
+                    view.preparePipeline(pipeline), writeConcern)
+                    .collation(options.getCollation()), getReadConcern());
             return view;
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
@@ -380,7 +386,8 @@ private CreateCollectionOperation getCreateCollectionOperation(final String coll
             validationAction = ValidationAction.fromString((String) options.get("validationAction"));
         }
         Collation collation = DBObjectCollationHelper.createCollationFromOptions(options);
-        return new CreateCollectionOperation(getName(), collectionName, getWriteConcern())
+        return new CreateCollectionOperation(getName(), collectionName,
+                getWriteConcern())
                    .capped(capped)
                    .collation(collation)
                    .sizeInBytes(sizeInBytes)
@@ -513,13 +520,17 @@ public String toString() {
     }
 
     CommandResult executeCommand(final BsonDocument commandDocument, final ReadPreference readPreference) {
-        return new CommandResult(executor.execute(new CommandReadOperation<>(getName(), commandDocument,
-                        new BsonDocumentCodec()), readPreference, getReadConcern()), getDefaultDBObjectCodec());
+        return new CommandResult(executor.execute(
+                new CommandReadOperation<>(getName(), commandDocument,
+                        new BsonDocumentCodec()), readPreference, getReadConcern(), null), getDefaultDBObjectCodec());
     }
 
     OperationExecutor getExecutor() {
         return executor;
     }
+    TimeoutSettings getTimeoutSettings() {
+        return mongo.getTimeoutSettings();
+    }
 
     private BsonDocument wrap(final DBObject document) {
         return new BsonDocumentWrapper<>(document, commandCodec);
@@ -561,6 +572,11 @@ Codec<DBObject> getDefaultDBObjectCodec() {
                 .withUuidRepresentation(getMongoClient().getMongoClientOptions().getUuidRepresentation());
     }
 
+    @Nullable
+    Long getTimeoutMS() {
+        return mongo.getMongoClientOptions().getTimeout();
+    }
+
     private static final Set<String> OBEDIENT_COMMANDS = new HashSet<>();
 
     static {
diff --git a/driver-legacy/src/main/com/mongodb/DBCollection.java b/driver-legacy/src/main/com/mongodb/DBCollection.java
index e71fd8c3aa4..54eb354a877 100644
--- a/driver-legacy/src/main/com/mongodb/DBCollection.java
+++ b/driver-legacy/src/main/com/mongodb/DBCollection.java
@@ -26,6 +26,7 @@
 import com.mongodb.client.model.DBCollectionFindOptions;
 import com.mongodb.client.model.DBCollectionRemoveOptions;
 import com.mongodb.client.model.DBCollectionUpdateOptions;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.bulk.DeleteRequest;
 import com.mongodb.internal.bulk.IndexRequest;
 import com.mongodb.internal.bulk.InsertRequest;
@@ -84,6 +85,7 @@
 import static com.mongodb.MongoNamespace.checkCollectionNameValidity;
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.ReadPreference.primaryPreferred;
+import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings;
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.Locks.withLock;
 import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE;
@@ -345,8 +347,8 @@ private Encoder<DBObject> toEncoder(@Nullable final DBEncoder dbEncoder) {
 
     private WriteResult insert(final List<InsertRequest> insertRequestList, final WriteConcern writeConcern,
                                final boolean continueOnError, @Nullable final Boolean bypassDocumentValidation) {
-        return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), !continueOnError, writeConcern,
-                retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation));
+        return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(),
+                !continueOnError, writeConcern, retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation));
     }
 
     WriteResult executeWriteOperation(final LegacyMixedBulkWriteOperation operation) {
@@ -429,8 +431,8 @@ private WriteResult replaceOrInsert(final DBObject obj, final Object id, final W
         UpdateRequest replaceRequest = new UpdateRequest(wrap(filter), wrap(obj, objectCodec),
                                                          Type.REPLACE).upsert(true);
 
-        return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, writeConcern, retryWrites,
-                singletonList(replaceRequest)));
+        return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false,
+                writeConcern, retryWrites, singletonList(replaceRequest)));
     }
 
     /**
@@ -582,8 +584,10 @@ public WriteResult update(final DBObject query, final DBObject update, final DBC
                                               .collation(options.getCollation())
                                               .arrayFilters(wrapAllowNull(options.getArrayFilters(), options.getEncoder()));
         LegacyMixedBulkWriteOperation operation = (updateType == UPDATE
-                ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest))
-                : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest)))
+                ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites,
+                singletonList(updateRequest))
+                : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites,
+                singletonList(updateRequest)))
                 .bypassDocumentValidation(options.getBypassDocumentValidation());
         return executeWriteOperation(operation);
     }
@@ -655,8 +659,8 @@ public WriteResult remove(final DBObject query, final DBCollectionRemoveOptions
         WriteConcern optionsWriteConcern = options.getWriteConcern();
         WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern();
         DeleteRequest deleteRequest = new DeleteRequest(wrap(query, options.getEncoder())).collation(options.getCollation());
-        return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, writeConcern, retryWrites,
-                singletonList(deleteRequest)));
+        return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false,
+                writeConcern, retryWrites, singletonList(deleteRequest)));
     }
 
     /**
@@ -913,12 +917,12 @@ public long getCount(@Nullable final DBObject query) {
      */
     public long getCount(@Nullable final DBObject query, final DBCollectionCountOptions options) {
         notNull("countOptions", options);
-        CountOperation operation = new CountOperation(getNamespace())
-                                       .skip(options.getSkip())
-                                       .limit(options.getLimit())
-                                       .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
-                                       .collation(options.getCollation())
-                                       .retryReads(retryReads);
+        CountOperation operation = new CountOperation(
+                getNamespace())
+                .skip(options.getSkip())
+                .limit(options.getLimit())
+                .collation(options.getCollation())
+                .retryReads(retryReads);
         if (query != null) {
             operation.filter(wrap(query));
         }
@@ -933,8 +937,9 @@ public long getCount(@Nullable final DBObject query, final DBCollectionCountOpti
         }
         ReadPreference optionsReadPreference = options.getReadPreference();
         ReadConcern optionsReadConcern = options.getReadConcern();
-        return executor.execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(),
-                optionsReadConcern != null ? optionsReadConcern : getReadConcern());
+        return getExecutor(createTimeoutSettings(getTimeoutSettings(), options))
+                .execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(),
+                        optionsReadConcern != null ? optionsReadConcern : getReadConcern(), null);
     }
 
     /**
@@ -961,8 +966,8 @@ public DBCollection rename(final String newName) {
     public DBCollection rename(final String newName, final boolean dropTarget) {
         try {
             executor.execute(new RenameCollectionOperation(getNamespace(),
-                                                           new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern())
-                                     .dropTarget(dropTarget), getReadConcern());
+                    new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern())
+                    .dropTarget(dropTarget), getReadConcern());
             return getDB().getCollection(newName);
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
@@ -1029,9 +1034,9 @@ public List distinct(final String fieldName, final DBObject query, final ReadPre
     public List distinct(final String fieldName, final DBCollectionDistinctOptions options) {
         notNull("fieldName", fieldName);
         return new MongoIterableImpl<BsonValue>(null, executor,
-                                                  options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(),
-                                                  options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(),
-                                                  retryReads) {
+                                                options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(),
+                                                options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(),
+                                                retryReads, DBCollection.this.getTimeoutSettings()) {
             @Override
             public ReadOperation<BatchCursor<BsonValue>> asReadOperation() {
                 return new DistinctOperation<>(getNamespace(), fieldName, new BsonValueCodec())
@@ -1039,6 +1044,12 @@ public ReadOperation<BatchCursor<BsonValue>> asReadOperation() {
                                .collation(options.getCollation())
                                .retryReads(retryReads);
             }
+
+            @Override
+            protected OperationExecutor getExecutor() {
+                return executor;
+            }
+
         }.map(bsonValue -> {
             if (bsonValue == null) {
                 return null;
@@ -1116,16 +1127,15 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) {
         Boolean jsMode = command.getJsMode();
         if (command.getOutputType() == MapReduceCommand.OutputType.INLINE) {
 
-            MapReduceWithInlineResultsOperation<DBObject> operation =
-                    new MapReduceWithInlineResultsOperation<>(getNamespace(), new BsonJavaScript(command.getMap()),
-                            new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec())
-                            .filter(wrapAllowNull(command.getQuery()))
-                            .limit(command.getLimit())
-                            .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS)
-                            .jsMode(jsMode != null && jsMode)
-                            .sort(wrapAllowNull(command.getSort()))
-                            .verbose(command.isVerbose())
-                            .collation(command.getCollation());
+            MapReduceWithInlineResultsOperation<DBObject> operation = new MapReduceWithInlineResultsOperation<>(
+                    getNamespace(), new BsonJavaScript(command.getMap()),
+                    new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec())
+                    .filter(wrapAllowNull(command.getQuery()))
+                    .limit(command.getLimit())
+                    .jsMode(jsMode != null && jsMode)
+                    .sort(wrapAllowNull(command.getSort()))
+                    .verbose(command.isVerbose())
+                    .collation(command.getCollation());
 
             if (scope != null) {
                 operation.scope(wrap(new BasicDBObject(scope)));
@@ -1133,7 +1143,9 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) {
             if (command.getFinalize() != null) {
                 operation.finalizeFunction(new BsonJavaScript(command.getFinalize()));
             }
-            MapReduceBatchCursor<DBObject> executionResult = executor.execute(operation, readPreference, getReadConcern());
+            MapReduceBatchCursor<DBObject> executionResult =
+                    getExecutor(createTimeoutSettings(getTimeoutSettings(), command))
+                    .execute(operation, readPreference, getReadConcern(), null);
             return new MapReduceOutput(command.toDBObject(), executionResult);
         } else {
             String action;
@@ -1152,14 +1164,11 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) {
             }
 
             MapReduceToCollectionOperation operation =
-                new MapReduceToCollectionOperation(getNamespace(),
-                                                   new BsonJavaScript(command.getMap()),
-                                                   new BsonJavaScript(command.getReduce()),
-                                                   command.getOutputTarget(),
-                                                   getWriteConcern())
+                new MapReduceToCollectionOperation(
+                        getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()),
+                        command.getOutputTarget(), getWriteConcern())
                     .filter(wrapAllowNull(command.getQuery()))
                     .limit(command.getLimit())
-                    .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS)
                     .jsMode(jsMode != null && jsMode)
                     .sort(wrapAllowNull(command.getSort()))
                     .verbose(command.isVerbose())
@@ -1225,27 +1234,31 @@ public Cursor aggregate(final List<? extends DBObject> pipeline, final Aggregati
         BsonValue outCollection = stages.get(stages.size() - 1).get("$out");
 
         if (outCollection != null) {
-            AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), stages,
-                    getReadConcern(), getWriteConcern())
-                                                       .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
-                                                       .allowDiskUse(options.getAllowDiskUse())
-                                                       .bypassDocumentValidation(options.getBypassDocumentValidation())
-                                                       .collation(options.getCollation());
+            AggregateToCollectionOperation operation =
+                    new AggregateToCollectionOperation(
+                            getNamespace(), stages, getReadConcern(), getWriteConcern())
+                            .allowDiskUse(options.getAllowDiskUse())
+                            .bypassDocumentValidation(options.getBypassDocumentValidation())
+                            .collation(options.getCollation());
             try {
-                executor.execute(operation, getReadPreference(), getReadConcern());
+               getExecutor(createTimeoutSettings(getTimeoutSettings(), options))
+                        .execute(operation, getReadPreference(), getReadConcern(), null);
                 result = new DBCursor(database.getCollection(outCollection.asString().getValue()), new BasicDBObject(),
                         new DBCollectionFindOptions().readPreference(primary()).collation(options.getCollation()));
             } catch (MongoWriteConcernException e) {
                 throw createWriteConcernException(e);
             }
         } else {
-            AggregateOperation<DBObject> operation = new AggregateOperation<>(getNamespace(), stages, getDefaultDBObjectCodec())
-                    .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
+            AggregateOperation<DBObject> operation = new AggregateOperation<>(
+                    getNamespace(), stages,
+                    getDefaultDBObjectCodec())
                     .allowDiskUse(options.getAllowDiskUse())
                     .batchSize(options.getBatchSize())
                     .collation(options.getCollation())
                     .retryReads(retryReads);
-            BatchCursor<DBObject> cursor1 = executor.execute(operation, readPreference, getReadConcern());
+            BatchCursor<DBObject> cursor1 =
+                   getExecutor(createTimeoutSettings(getTimeoutSettings(), options))
+                            .execute(operation, readPreference, getReadConcern(), null);
             result = new MongoCursorAdapter(new MongoBatchCursorAdapter<>(cursor1));
         }
         return result;
@@ -1262,14 +1275,14 @@ public Cursor aggregate(final List<? extends DBObject> pipeline, final Aggregati
      * @mongodb.server.release 3.6
      */
     public CommandResult explainAggregate(final List<? extends DBObject> pipeline, final AggregationOptions options) {
-        AggregateOperation<BsonDocument> operation = new AggregateOperation<>(getNamespace(), preparePipeline(pipeline),
-                new BsonDocumentCodec())
-                                                         .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
-                                                         .allowDiskUse(options.getAllowDiskUse())
-                                                         .collation(options.getCollation())
-                                                         .retryReads(retryReads);
-        return new CommandResult(executor.execute(operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()),
-                primaryPreferred(), getReadConcern()), getDefaultDBObjectCodec());
+        AggregateOperation<BsonDocument> operation = new AggregateOperation<>(
+                getNamespace(),
+                preparePipeline(pipeline), new BsonDocumentCodec())
+                .allowDiskUse(options.getAllowDiskUse())
+                .collation(options.getCollation())
+                .retryReads(retryReads);
+        return new CommandResult(executor.execute(
+                operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), primaryPreferred(), getReadConcern(), null), getDefaultDBObjectCodec());
     }
 
     List<BsonDocument> preparePipeline(final List<? extends DBObject> pipeline) {
@@ -1657,7 +1670,6 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod
                         .filter(wrapAllowNull(query))
                         .projection(wrapAllowNull(options.getProjection()))
                         .sort(wrapAllowNull(options.getSort()))
-                        .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                         .collation(options.getCollation());
         } else {
             DBObject update = options.getUpdate();
@@ -1665,33 +1677,31 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod
                 throw new IllegalArgumentException("update can not be null unless it's a remove");
             }
             if (!update.keySet().isEmpty() && update.keySet().iterator().next().charAt(0) == '$') {
-                operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec,
-                        wrap(update))
+                operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites,
+                        objectCodec, wrap(update))
                         .filter(wrap(query))
                         .projection(wrapAllowNull(options.getProjection()))
                         .sort(wrapAllowNull(options.getSort()))
                         .returnOriginal(!options.returnNew())
                         .upsert(options.isUpsert())
-                        .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                         .bypassDocumentValidation(options.getBypassDocumentValidation())
                         .collation(options.getCollation())
                         .arrayFilters(wrapAllowNull(options.getArrayFilters(), (Encoder<DBObject>) null));
             } else {
-                operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec,
-                        wrap(update))
+                operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites,
+                        objectCodec, wrap(update))
                         .filter(wrap(query))
                         .projection(wrapAllowNull(options.getProjection()))
                         .sort(wrapAllowNull(options.getSort()))
                         .returnOriginal(!options.returnNew())
                         .upsert(options.isUpsert())
-                        .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS)
                         .bypassDocumentValidation(options.getBypassDocumentValidation())
                         .collation(options.getCollation());
             }
         }
 
         try {
-            return executor.execute(operation, getReadConcern());
+            return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)).execute(operation, getReadConcern());
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
         }
@@ -1791,7 +1801,8 @@ public ReadConcern getReadConcern() {
      */
     public void drop() {
         try {
-            executor.execute(new DropCollectionOperation(getNamespace(), getWriteConcern()), getReadConcern());
+            executor.execute(new DropCollectionOperation(getNamespace(),
+                            getWriteConcern()), getReadConcern());
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
         }
@@ -1859,10 +1870,17 @@ public void setDBEncoderFactory(@Nullable final DBEncoderFactory factory) {
      * @mongodb.driver.manual core/indexes/ Indexes
      */
     public List<DBObject> getIndexInfo() {
-        return new MongoIterableImpl<DBObject>(null, executor, ReadConcern.DEFAULT, primary(), retryReads) {
+        return new MongoIterableImpl<DBObject>(null, executor, ReadConcern.DEFAULT, primary(), retryReads,
+                DBCollection.this.getTimeoutSettings()) {
             @Override
             public ReadOperation<BatchCursor<DBObject>> asReadOperation() {
-                return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()).retryReads(retryReads);
+                return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec())
+                        .retryReads(retryReads);
+            }
+
+            @Override
+            public OperationExecutor getExecutor() {
+                return executor;
             }
         }.into(new ArrayList<>());
     }
@@ -1877,7 +1895,8 @@ public ReadOperation<BatchCursor<DBObject>> asReadOperation() {
      */
     public void dropIndex(final DBObject index) {
         try {
-            executor.execute(new DropIndexOperation(getNamespace(), wrap(index), getWriteConcern()), getReadConcern());
+            executor.execute(new DropIndexOperation(getNamespace(), wrap(index),
+                    getWriteConcern()), getReadConcern());
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
         }
@@ -1892,7 +1911,8 @@ public void dropIndex(final DBObject index) {
      */
     public void dropIndex(final String indexName) {
         try {
-            executor.execute(new DropIndexOperation(getNamespace(), indexName, getWriteConcern()), getReadConcern());
+            executor.execute(new DropIndexOperation(getNamespace(), indexName,
+                    getWriteConcern()), getReadConcern());
         } catch (MongoWriteConcernException e) {
             throw createWriteConcernException(e);
         }
@@ -2006,9 +2026,9 @@ BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean b
                                               final List<WriteRequest> writeRequests,
                                               final WriteConcern writeConcern) {
         try {
-            return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(getNamespace(),
-                            translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false)
-                            .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec());
+            return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(
+                    getNamespace(), translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false)
+                    .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec());
         } catch (MongoBulkWriteException e) {
             throw BulkWriteHelper.translateBulkWriteException(e, MongoClient.getDefaultCodecRegistry().get(DBObject.class));
         }
@@ -2180,6 +2200,10 @@ BsonDocument wrap(final DBObject document, @Nullable final Encoder<DBObject> enc
         }
     }
 
+    TimeoutSettings getTimeoutSettings(){
+       return database.getTimeoutSettings();
+    }
+
     static WriteConcernException createWriteConcernException(final MongoWriteConcernException e) {
         return new WriteConcernException(new BsonDocument("code", new BsonInt32(e.getWriteConcernError().getCode()))
                                                 .append("errmsg", new BsonString(e.getWriteConcernError().getMessage())),
@@ -2187,4 +2211,8 @@ static WriteConcernException createWriteConcernException(final MongoWriteConcern
                                                e.getWriteResult());
     }
 
+    private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) {
+        return executor.withTimeoutSettings(timeoutSettings);
+    }
+
 }
diff --git a/driver-legacy/src/main/com/mongodb/DBCursor.java b/driver-legacy/src/main/com/mongodb/DBCursor.java
index 739901b7c57..9b91bad5984 100644
--- a/driver-legacy/src/main/com/mongodb/DBCursor.java
+++ b/driver-legacy/src/main/com/mongodb/DBCursor.java
@@ -36,6 +36,7 @@
 import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.MongoClient.getDefaultCodecRegistry;
+import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings;
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
@@ -370,9 +371,9 @@ public DBCursor maxTime(final long maxTime, final TimeUnit timeUnit) {
      * @mongodb.server.release 3.0
      */
     public DBObject explain() {
-        return executor.execute(getQueryOperation(collection.getObjectCodec())
-                        .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)),
-                getReadPreference(), getReadConcern());
+        return executor.execute(
+                getQueryOperation(collection.getObjectCodec())
+                                .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), getReadPreference(), getReadConcern(), null);
     }
 
     /**
@@ -413,31 +414,29 @@ public DBCursor partial(final boolean partial) {
     }
 
     private FindOperation<DBObject> getQueryOperation(final Decoder<DBObject> decoder) {
-
-        return new FindOperation<>(collection.getNamespace(), decoder)
-                                                .filter(collection.wrapAllowNull(filter))
-                                                .batchSize(findOptions.getBatchSize())
-                                                .skip(findOptions.getSkip())
-                                                .limit(findOptions.getLimit())
-                                                .maxAwaitTime(findOptions.getMaxAwaitTime(MILLISECONDS), MILLISECONDS)
-                                                .maxTime(findOptions.getMaxTime(MILLISECONDS), MILLISECONDS)
-                                                .projection(collection.wrapAllowNull(findOptions.getProjection()))
-                                                .sort(collection.wrapAllowNull(findOptions.getSort()))
-                                                .collation(findOptions.getCollation())
-                                                .comment(findOptions.getComment() != null
-                                                        ? new BsonString(findOptions.getComment()) : null)
-                                                .hint(findOptions.getHint() != null
-                                                        ? collection.wrapAllowNull(findOptions.getHint())
-                                                        : (findOptions.getHintString() != null
-                                                        ? new BsonString(findOptions.getHintString()) : null))
-                                                .min(collection.wrapAllowNull(findOptions.getMin()))
-                                                .max(collection.wrapAllowNull(findOptions.getMax()))
-                                                .cursorType(findOptions.getCursorType())
-                                                .noCursorTimeout(findOptions.isNoCursorTimeout())
-                                                .partial(findOptions.isPartial())
-                                                .returnKey(findOptions.isReturnKey())
-                                                .showRecordId(findOptions.isShowRecordId())
-                                                .retryReads(retryReads);
+        return new FindOperation<>(
+                collection.getNamespace(), decoder)
+                .filter(collection.wrapAllowNull(filter))
+                .batchSize(findOptions.getBatchSize())
+                .skip(findOptions.getSkip())
+                .limit(findOptions.getLimit())
+                .projection(collection.wrapAllowNull(findOptions.getProjection()))
+                .sort(collection.wrapAllowNull(findOptions.getSort()))
+                .collation(findOptions.getCollation())
+                .comment(findOptions.getComment() != null
+                        ? new BsonString(findOptions.getComment()) : null)
+                .hint(findOptions.getHint() != null
+                        ? collection.wrapAllowNull(findOptions.getHint())
+                        : (findOptions.getHintString() != null
+                        ? new BsonString(findOptions.getHintString()) : null))
+                .min(collection.wrapAllowNull(findOptions.getMin()))
+                .max(collection.wrapAllowNull(findOptions.getMax()))
+                .cursorType(findOptions.getCursorType())
+                .noCursorTimeout(findOptions.isNoCursorTimeout())
+                .partial(findOptions.isPartial())
+                .returnKey(findOptions.isReturnKey())
+                .showRecordId(findOptions.isShowRecordId())
+                .retryReads(retryReads);
     }
 
     /**
@@ -787,7 +786,10 @@ public String toString() {
     }
 
     private void initializeCursor(final FindOperation<DBObject> operation) {
-        cursor = new MongoBatchCursorAdapter<>(executor.execute(operation, getReadPreference(), getReadConcern()));
+        cursor =
+                new MongoBatchCursorAdapter<>(executor
+                        .withTimeoutSettings(createTimeoutSettings(collection.getTimeoutSettings(), findOptions))
+                        .execute(operation, getReadPreference(), getReadConcern(), null));
         ServerCursor serverCursor = cursor.getServerCursor();
         if (isCursorFinalizerEnabled() && serverCursor != null) {
             optionalCleaner = DBCursorCleaner.create(collection.getDB().getMongoClient(), collection.getNamespace(),
diff --git a/driver-legacy/src/main/com/mongodb/MongoClient.java b/driver-legacy/src/main/com/mongodb/MongoClient.java
index 94432049351..1e3f0a00c2b 100644
--- a/driver-legacy/src/main/com/mongodb/MongoClient.java
+++ b/driver-legacy/src/main/com/mongodb/MongoClient.java
@@ -28,11 +28,15 @@
 import com.mongodb.connection.ClusterSettings;
 import com.mongodb.event.ClusterListener;
 import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.binding.ConnectionSource;
 import com.mongodb.internal.binding.ReadWriteBinding;
 import com.mongodb.internal.binding.SingleServerBinding;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.NoOpSessionContext;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
 import com.mongodb.internal.session.ServerSessionPool;
@@ -824,6 +828,10 @@ MongoClientImpl getDelegate() {
         return delegate;
     }
 
+    TimeoutSettings getTimeoutSettings() {
+        return delegate.getTimeoutSettings();
+    }
+
     private ExecutorService createCursorCleaningService() {
         ScheduledExecutorService newTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("CleanCursors"));
         newTimer.scheduleAtFixedRate(this::cleanCursors, 1, 1, SECONDS);
@@ -834,7 +842,8 @@ private void cleanCursors() {
         ServerCursorAndNamespace cur;
         while ((cur = orphanedCursors.poll()) != null) {
             ReadWriteBinding binding = new SingleServerBinding(delegate.getCluster(), cur.serverCursor.getAddress(),
-                    options.getServerApi(), IgnorableRequestContext.INSTANCE);
+                    new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE,
+                            new TimeoutContext(getTimeoutSettings()), options.getServerApi()));
             try {
                 ConnectionSource source = binding.getReadConnectionSource();
                 try {
@@ -843,7 +852,7 @@ private void cleanCursors() {
                         BsonDocument killCursorsCommand = new BsonDocument("killCursors", new BsonString(cur.namespace.getCollectionName()))
                                 .append("cursors", new BsonArray(singletonList(new BsonInt64(cur.serverCursor.getId()))));
                         connection.command(cur.namespace.getDatabaseName(), killCursorsCommand, new NoOpFieldNameValidator(),
-                                ReadPreference.primary(), new BsonDocumentCodec(), source);
+                                ReadPreference.primary(), new BsonDocumentCodec(), source.getOperationContext());
                     } finally {
                         connection.release();
                     }
diff --git a/driver-legacy/src/main/com/mongodb/MongoClientOptions.java b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java
index d5fe68e2f70..1f19fba3484 100644
--- a/driver-legacy/src/main/com/mongodb/MongoClientOptions.java
+++ b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java
@@ -16,8 +16,10 @@
 
 package com.mongodb;
 
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.annotations.NotThreadSafe;
+import com.mongodb.annotations.Reason;
 import com.mongodb.connection.ClusterConnectionMode;
 import com.mongodb.connection.ConnectionPoolSettings;
 import com.mongodb.event.ClusterListener;
@@ -550,6 +552,38 @@ public ServerApi getServerApi() {
         return wrapped.getServerApi();
     }
 
+    /**
+     * The time limit for the full execution of an operation in Milliseconds.
+     *
+     * <p>If set the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @return the timeout in milliseconds
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    public Long getTimeout() {
+        return wrapped.getTimeout(MILLISECONDS);
+    }
+
     /**
      * Gets the server selector.
      *
@@ -1316,6 +1350,37 @@ public Builder srvServiceName(final String srvServiceName) {
             return this;
         }
 
+        /**
+         * Sets the time limit, in milliseconds for the full execution of an operation.
+         *
+         * <ul>
+         *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+         *    <ul>
+         *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+         *        available</li>
+         *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+         *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+         *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+         *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+         *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+         *        See: {@link TransactionOptions#getMaxCommitTime}.</li>
+         *   </ul>
+         *   </li>
+         *   <li>{@code 0} means infinite timeout.</li>
+         *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+         * </ul>
+         *
+         * @param timeoutMS the timeout in milliseconds
+         * @return this
+         * @since 5.2
+         * @see #getTimeout
+         */
+        @Alpha(Reason.CLIENT)
+        public Builder timeout(final long timeoutMS) {
+            wrapped.timeout(timeoutMS, MILLISECONDS);
+            return this;
+        }
+
         /**
          * Build an instance of MongoClientOptions.
          *
diff --git a/driver-legacy/src/main/com/mongodb/MongoClientURI.java b/driver-legacy/src/main/com/mongodb/MongoClientURI.java
index 43cdccc4f4b..e471bbf1686 100644
--- a/driver-legacy/src/main/com/mongodb/MongoClientURI.java
+++ b/driver-legacy/src/main/com/mongodb/MongoClientURI.java
@@ -99,7 +99,8 @@
  * sslInvalidHostNameAllowed option</li>
  * <li>{@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.</li>
  * <li>{@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out.
- * This option is the same as {@link MongoClientOptions#getSocketTimeout()}.</li>
+ * This option is the same as {@link MongoClientOptions#getSocketTimeout()}.
+ * Deprecated, use {@code timeoutMS} instead.</li>
  * <li>{@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed</li>
  * <li>{@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed</li>
  * </ul>
@@ -114,6 +115,8 @@
  * <ul>
  * <li>{@code maxPoolSize=n}: The maximum number of connections in the connection pool.</li>
  * <li>{@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.</li>
+ * <li>{@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to
+ *      become available. Deprecated, use {@code timeoutMS} instead.</li>
  * </ul>
  *
  * <p>Write concern configuration:</p>
@@ -138,7 +141,7 @@
  * {@code "majority"}</li>
  *      </ul>
  *  </li>
- *  <li>{@code wtimeoutMS=ms}
+ *  <li>{@code wtimeoutMS=ms}. Deprecated, use {@code timeoutMS} instead.
  *      <ul>
  *          <li>The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.</li>
  *          <li>Used in combination with {@code w}</li>
@@ -459,6 +462,10 @@ public MongoClientOptions getOptions() {
         if (srvServiceName != null) {
             builder.srvServiceName(srvServiceName);
         }
+        Long timeout = proxied.getTimeout();
+        if (timeout != null) {
+            builder.timeout(timeout);
+        }
         return builder.build();
     }
 
diff --git a/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java
new file mode 100644
index 00000000000..e47dd7bd32b
--- /dev/null
+++ b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb;
+
+import com.mongodb.client.model.DBCollectionCountOptions;
+import com.mongodb.client.model.DBCollectionFindAndModifyOptions;
+import com.mongodb.client.model.DBCollectionFindOptions;
+import com.mongodb.internal.TimeoutSettings;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+final class TimeoutSettingsHelper {
+
+    private TimeoutSettingsHelper() {
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS) {
+        return timeoutSettings.withMaxTimeMS(maxTimeMS);
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS, final long maxAwaitTimeMS) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS);
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final AggregationOptions options) {
+        return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS));
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionCountOptions options) {
+        return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS));
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindOptions options) {
+        return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS));
+    }
+
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindAndModifyOptions options) {
+        return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS));
+    }
+
+    @SuppressWarnings("deprecation")
+    static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final MapReduceCommand options) {
+        return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS));
+    }
+
+}
diff --git a/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java b/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java
index f63224cc5f0..cc515f1cb4f 100644
--- a/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java
+++ b/driver-legacy/src/test/functional/com/mongodb/ClientSideEncryptionLegacyTest.java
@@ -47,6 +47,7 @@ protected MongoDatabase getDatabase(final String databaseName) {
 
     @After
     public void cleanUp() {
+        super.cleanUp();
         if (mongoClient != null) {
             mongoClient.close();
         }
diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy
index 6118ce4cdaa..98cb8282c17 100644
--- a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy
+++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy
@@ -271,7 +271,8 @@ class DBCollectionSpecification extends Specification {
         collection.find().iterator().hasNext()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .retryReads(true))
 
@@ -280,7 +281,8 @@ class DBCollectionSpecification extends Specification {
         collection.find().iterator().hasNext()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .retryReads(true))
 
@@ -289,7 +291,8 @@ class DBCollectionSpecification extends Specification {
         collection.find(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)).iterator().hasNext()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .collation(collation)
                 .retryReads(true))
@@ -311,7 +314,8 @@ class DBCollectionSpecification extends Specification {
         collection.findOne()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .limit(-1)
                 .retryReads(true))
@@ -321,7 +325,8 @@ class DBCollectionSpecification extends Specification {
         collection.findOne()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .limit(-1)
                 .retryReads(true))
@@ -331,7 +336,8 @@ class DBCollectionSpecification extends Specification {
         collection.findOne(new BasicDBObject(), new DBCollectionFindOptions().collation(collation))
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
                 .filter(new BsonDocument())
                 .limit(-1)
                 .collation(collation)
@@ -351,8 +357,8 @@ class DBCollectionSpecification extends Specification {
         collection.findAndRemove(query)
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation<DBObject>(collection.getNamespace(),
-                WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument()))
+        expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation<DBObject>(collection.
+                getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument()))
     }
 
     def 'findAndModify should create the correct FindAndUpdateOperation'() {
@@ -383,7 +389,8 @@ class DBCollectionSpecification extends Specification {
         expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation<DBObject>(collection.getNamespace(), WriteConcern.W3,
                 retryWrites, collection.getObjectCodec(), bsonUpdate)
                 .filter(new BsonDocument())
-                .collation(collation).arrayFilters(bsonDocumentWrapperArrayFilters))
+                .collation(collation)
+                .arrayFilters(bsonDocumentWrapperArrayFilters))
 
         where:
         dbObjectArrayFilters <<            [null, [], [new BasicDBObject('i.b', 1)]]
@@ -407,8 +414,8 @@ class DBCollectionSpecification extends Specification {
         collection.findAndModify(query, replace)
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation<DBObject>(collection.getNamespace(),
-                WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace)
+        expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation<DBObject>(collection.
+                getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace)
                 .filter(new BsonDocument()))
 
         when: // With options
@@ -477,8 +484,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         distinctFieldValues == [1, 2]
-        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec())
-                                                                .filter(new BsonDocument()).retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1',
+                new BsonValueCodec()).filter(new BsonDocument()).retryReads(true))
         executor.getReadConcern() == ReadConcern.DEFAULT
 
         when: // Inherits from DB
@@ -486,7 +493,8 @@ class DBCollectionSpecification extends Specification {
         collection.distinct('field1')
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec())
+        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1',
+                new BsonValueCodec())
                 .filter(new BsonDocument()).retryReads(true))
         executor.getReadConcern() == ReadConcern.MAJORITY
 
@@ -495,8 +503,8 @@ class DBCollectionSpecification extends Specification {
         collection.distinct('field1', new DBCollectionDistinctOptions().collation(collation))
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec())
-                .collation(collation).retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1',
+                new BsonValueCodec()).collation(collation).retryReads(true))
         executor.getReadConcern() == ReadConcern.LOCAL
     }
 
@@ -515,8 +523,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getReadOperation(), isTheSameAs(
-                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        collection.getDefaultDBObjectCodec())
+                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec())
                         .verbose(true)
                         .filter(new BsonDocument()))
         executor.getReadConcern() == ReadConcern.DEFAULT
@@ -527,8 +535,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getReadOperation(), isTheSameAs(
-                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        collection.getDefaultDBObjectCodec())
+                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec())
                         .verbose(true)
                         .filter(new BsonDocument()))
         executor.getReadConcern() == ReadConcern.LOCAL
@@ -542,8 +550,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getReadOperation(), isTheSameAs(
-                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        collection.getDefaultDBObjectCodec())
+                new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec())
                         .verbose(true)
                         .filter(new BsonDocument())
                         .collation(collation))
@@ -562,8 +570,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getWriteOperation(), isTheSameAs(
-                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        'myColl', collection.getWriteConcern())
+                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern())
                         .verbose(true)
                         .filter(new BsonDocument())
         )
@@ -573,8 +581,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getWriteOperation(), isTheSameAs(
-                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        'myColl', collection.getWriteConcern())
+                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern())
                         .verbose(true)
                         .filter(new BsonDocument())
         )
@@ -587,8 +595,8 @@ class DBCollectionSpecification extends Specification {
 
         then:
         expect executor.getWriteOperation(), isTheSameAs(
-                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'),
-                        'myColl', collection.getWriteConcern())
+                new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'),
+                        new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern())
                         .verbose(true)
                         .filter(new BsonDocument())
                         .collation(collation)
@@ -611,8 +619,8 @@ class DBCollectionSpecification extends Specification {
         collection.aggregate(pipeline, AggregationOptions.builder().build())
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true))
         executor.getReadConcern() == ReadConcern.DEFAULT
 
         when: // Inherits from DB
@@ -620,8 +628,8 @@ class DBCollectionSpecification extends Specification {
         collection.aggregate(pipeline, AggregationOptions.builder().build())
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true))
         executor.getReadConcern() == ReadConcern.MAJORITY
 
         when:
@@ -629,8 +637,8 @@ class DBCollectionSpecification extends Specification {
         collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build())
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true))
         executor.getReadConcern() == ReadConcern.LOCAL
     }
 
@@ -678,8 +686,8 @@ class DBCollectionSpecification extends Specification {
         collection.explainAggregate(pipeline, options)
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
                 .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()))
 
         when: // Inherits from DB
@@ -687,8 +695,8 @@ class DBCollectionSpecification extends Specification {
         collection.explainAggregate(pipeline, options)
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
                 .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()))
 
         when:
@@ -696,8 +704,8 @@ class DBCollectionSpecification extends Specification {
         collection.explainAggregate(pipeline, options)
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline,
-                collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
+        expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(),
+                bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation)
                 .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()))
     }
 
@@ -717,8 +725,8 @@ class DBCollectionSpecification extends Specification {
         collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update))
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true,
-                WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest)))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(),
+                true, WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest)))
 
         when: // Inherits from DB
         db.setWriteConcern(WriteConcern.W3)
@@ -726,8 +734,8 @@ class DBCollectionSpecification extends Specification {
 
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true,
-                WriteConcern.W3, retryWrites, asList(updateRequest)))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(),
+                true, WriteConcern.W3, retryWrites, asList(updateRequest)))
 
         when:
         collection.setWriteConcern(WriteConcern.W1)
@@ -736,8 +744,8 @@ class DBCollectionSpecification extends Specification {
                 new DBCollectionUpdateOptions().collation(collation).arrayFilters(dbObjectArrayFilters))
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true,
-                WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters))))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(),
+                true, WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters))))
 
         where:
         dbObjectArrayFilters <<            [null, [], [new BasicDBObject('i.b', 1)]]
@@ -759,16 +767,16 @@ class DBCollectionSpecification extends Specification {
         collection.remove(BasicDBObject.parse(query))
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false,
-                WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest)))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(),
+                false, WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest)))
 
         when: // Inherits from DB
         db.setWriteConcern(WriteConcern.W3)
         collection.remove(BasicDBObject.parse(query))
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false,
-                WriteConcern.W3, retryWrites, asList(deleteRequest)))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(),
+                false, WriteConcern.W3, retryWrites, asList(deleteRequest)))
 
         when:
         collection.setWriteConcern(WriteConcern.W1)
@@ -776,8 +784,8 @@ class DBCollectionSpecification extends Specification {
         collection.remove(BasicDBObject.parse(query), new DBCollectionRemoveOptions().collation(collation))
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false,
-                WriteConcern.W1, retryWrites, asList(deleteRequest)))
+        expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(),
+                false, WriteConcern.W1, retryWrites, asList(deleteRequest)))
     }
 
     def 'should create the correct MixedBulkWriteOperation'() {
@@ -808,7 +816,8 @@ class DBCollectionSpecification extends Specification {
         bulk().execute()
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered,
+        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(),
+                writeRequests, ordered,
                 WriteConcern.ACKNOWLEDGED, false))
 
         when: // Inherits from DB
@@ -816,16 +825,16 @@ class DBCollectionSpecification extends Specification {
         bulk().execute()
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered,
-                WriteConcern.W3, false))
+        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(),
+                writeRequests, ordered, WriteConcern.W3, false))
 
         when:
         collection.setWriteConcern(WriteConcern.W1)
         bulk().execute()
 
         then:
-        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered,
-                WriteConcern.W1, false))
+        expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(),
+                writeRequests, ordered, WriteConcern.W1, false))
 
         where:
         ordered << [true, false, true]
diff --git a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy
index 227126b1160..85fb3ad867e 100644
--- a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy
+++ b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy
@@ -167,7 +167,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec
     def 'should return correct result for replace'() {
         given:
         def replacement = new UpdateRequest(new BsonDocument(), new BsonDocument('_id', new BsonInt32(1)), REPLACE)
-        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement))
+        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED,
+                false, asList(replacement))
 
         when:
         def result = execute(operation)
@@ -182,11 +183,13 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec
     def 'should replace a single document'() {
         given:
         def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))
-        createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)).execute(getBinding())
+        createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert))
+                .execute(getBinding())
 
         def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
                 new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE)
-        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement))
+        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED,
+                false, asList(replacement))
 
         when:
         def result = execute(operation)
@@ -205,7 +208,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec
         def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)),
                 new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE)
                 .upsert(true)
-        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement))
+        def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED,
+                false, asList(replacement))
 
         when:
         execute(operation)
@@ -216,9 +220,9 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec
 
     def 'should update nothing if no documents match'() {
         given:
-        def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false,
-                asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)),
-                        new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false)))
+        def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED,
+                false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)),
+                new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false)))
 
         when:
         WriteConcernResult result = execute(operation)
diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy
index 84a755b5353..59dceb6478a 100644
--- a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy
+++ b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy
@@ -122,10 +122,11 @@ class DBCursorSpecification extends Specification {
         cursor.toArray()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
-                                                                .filter(new BsonDocument())
-                                                                .projection(new BsonDocument())
-                                                                .retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(),
+                collection.getObjectCodec())
+                .filter(new BsonDocument())
+                .projection(new BsonDocument())
+                .retryReads(true))
     }
 
 
@@ -140,11 +141,13 @@ class DBCursorSpecification extends Specification {
         cursor.one()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
-                                                                .limit(-1)
-                                                                .filter(new BsonDocument())
-                                                                .projection(new BsonDocument())
-                                                                .retryReads(true))
+        expect executor.getReadOperation(), isTheSameAs(
+                new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+                        .limit(-1)
+                        .filter(new BsonDocument())
+                        .projection(new BsonDocument())
+                        .retryReads(true)
+        )
     }
 
     def 'DBCursor methods should be used to create the expected operation'() {
@@ -167,7 +170,7 @@ class DBCursorSpecification extends Specification {
                 .batchSize(1)
                 .cursorType(cursorType)
                 .limit(1)
-                .maxTime(1, TimeUnit.MILLISECONDS)
+                .maxTime(100, TimeUnit.MILLISECONDS)
                 .noCursorTimeout(true)
                 .partial(true)
                 .skip(1)
@@ -177,13 +180,13 @@ class DBCursorSpecification extends Specification {
         cursor.toArray()
 
         then:
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec())
+        expect executor.getReadOperation(), isTheSameAs(
+                new FindOperation(collection.getNamespace(), collection.getObjectCodec())
                 .batchSize(1)
                 .collation(collation)
                 .cursorType(cursorType)
                 .filter(bsonFilter)
                 .limit(1)
-                .maxTime(1, TimeUnit.MILLISECONDS)
                 .noCursorTimeout(true)
                 .partial(true)
                 .skip(1)
@@ -221,8 +224,8 @@ class DBCursorSpecification extends Specification {
                 .collation(collation)
                 .cursorType(cursorType)
                 .limit(1)
-                .maxAwaitTime(1, TimeUnit.MILLISECONDS)
-                .maxTime(1, TimeUnit.MILLISECONDS)
+                .maxAwaitTime(1001, TimeUnit.MILLISECONDS)
+                .maxTime(101, TimeUnit.MILLISECONDS)
                 .noCursorTimeout(true)
                 .partial(true)
                 .projection(projection)
@@ -249,8 +252,6 @@ class DBCursorSpecification extends Specification {
                 .cursorType(cursorType)
                 .filter(bsonFilter)
                 .limit(1)
-                .maxAwaitTime(1, TimeUnit.MILLISECONDS)
-                .maxTime(1, TimeUnit.MILLISECONDS)
                 .noCursorTimeout(true)
                 .partial(true)
                 .projection(bsonProjection)
diff --git a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy
index fe61ba00a3d..5f0c81f28cc 100644
--- a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy
+++ b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy
@@ -36,6 +36,7 @@ import spock.lang.Specification
 
 import static Fixture.getMongoClient
 import static com.mongodb.ClusterFixture.serverVersionLessThan
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry
 import static org.junit.Assume.assumeTrue
@@ -76,6 +77,7 @@ class DBSpecification extends Specification {
         def mongo = Stub(MongoClient)
         mongo.mongoClientOptions >> MongoClientOptions.builder().build()
         mongo.codecRegistry >> getDefaultCodecRegistry()
+        mongo.timeoutSettings >> TIMEOUT_SETTINGS
         def executor = new TestOperationExecutor([1L, 2L, 3L])
         def db = new DB(mongo, 'test', executor)
         db.setReadConcern(ReadConcern.MAJORITY)
@@ -134,7 +136,8 @@ class DBSpecification extends Specification {
         operation = executor.getWriteOperation() as CreateCollectionOperation
 
         then:
-        expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()).collation(collation))
+        expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern())
+                .collation(collation))
         executor.getReadConcern() == ReadConcern.MAJORITY
     }
 
@@ -144,6 +147,7 @@ class DBSpecification extends Specification {
             getCodecRegistry() >> MongoClient.defaultCodecRegistry
         }
         mongo.mongoClientOptions >> MongoClientOptions.builder().build()
+        mongo.timeoutSettings >> TIMEOUT_SETTINGS
         def executor = new TestOperationExecutor([1L, 2L, 3L])
 
         def databaseName = 'test'
@@ -180,6 +184,7 @@ class DBSpecification extends Specification {
         given:
         def mongo = Stub(MongoClient)
         mongo.mongoClientOptions >> MongoClientOptions.builder().build()
+        mongo.timeoutSettings >> TIMEOUT_SETTINGS
         def executor = new TestOperationExecutor([Stub(BatchCursor), Stub(BatchCursor)])
 
         def databaseName = 'test'
@@ -191,7 +196,8 @@ class DBSpecification extends Specification {
         def operation = executor.getReadOperation() as ListCollectionsOperation
 
         then:
-        expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry()))
+        expect operation, isTheSameAs(new ListCollectionsOperation(databaseName,
+                new DBObjectCodec(getDefaultCodecRegistry()))
                 .nameOnly(true))
 
         when:
@@ -199,7 +205,8 @@ class DBSpecification extends Specification {
         operation = executor.getReadOperation() as ListCollectionsOperation
 
         then:
-        expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry()))
+        expect operation, isTheSameAs(new ListCollectionsOperation(databaseName,
+                new DBObjectCodec(getDefaultCodecRegistry()))
                 .nameOnly(true))
     }
 
diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy
index c36eacd2198..ae1d332674c 100644
--- a/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy
+++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy
@@ -51,6 +51,7 @@ class MongoClientOptionsSpecification extends Specification {
         options.getMinConnectionsPerHost() == 0
         options.getConnectionsPerHost() == 100
         options.getMaxConnecting() == 2
+        options.getTimeout() == null
         options.getConnectTimeout() == 10000
         options.getReadPreference() == ReadPreference.primary()
         options.getServerSelector() == null
@@ -119,6 +120,7 @@ class MongoClientOptionsSpecification extends Specification {
                                         .readConcern(ReadConcern.MAJORITY)
                                         .minConnectionsPerHost(30)
                                         .connectionsPerHost(500)
+                                        .timeout(10_000)
                                         .connectTimeout(100)
                                         .socketTimeout(700)
                                         .serverSelector(serverSelector)
@@ -161,6 +163,7 @@ class MongoClientOptionsSpecification extends Specification {
         options.getRetryWrites()
         !options.getRetryReads()
         options.getServerSelectionTimeout() == 150
+        options.getTimeout() == 10_000
         options.getMaxWaitTime() == 200
         options.getMaxConnectionIdleTime() == 300
         options.getMaxConnectionLifeTime() == 400
@@ -211,6 +214,7 @@ class MongoClientOptionsSpecification extends Specification {
         settings.readConcern == ReadConcern.MAJORITY
         settings.uuidRepresentation == UuidRepresentation.C_SHARP_LEGACY
         settings.serverApi == serverApi
+        settings.getTimeout(TimeUnit.MILLISECONDS) == 10_000
 
         when:
         def optionsFromSettings = MongoClientOptions.builder(settings).build()
@@ -224,6 +228,7 @@ class MongoClientOptionsSpecification extends Specification {
         optionsFromSettings.getRetryWrites()
         !optionsFromSettings.getRetryReads()
         optionsFromSettings.getServerSelectionTimeout() == 150
+        optionsFromSettings.getServerSelectionTimeout() == 150
         optionsFromSettings.getMaxWaitTime() == 200
         optionsFromSettings.getMaxConnectionIdleTime() == 300
         optionsFromSettings.getMaxConnectionLifeTime() == 400
@@ -317,6 +322,7 @@ class MongoClientOptionsSpecification extends Specification {
                 .writeConcern(WriteConcern.JOURNALED)
                 .minConnectionsPerHost(30)
                 .connectionsPerHost(500)
+                .timeout(10_000)
                 .connectTimeout(100)
                 .socketTimeout(700)
                 .serverSelectionTimeout(150)
@@ -616,6 +622,7 @@ class MongoClientOptionsSpecification extends Specification {
                 .uuidRepresentation(UuidRepresentation.STANDARD)
                 .minConnectionsPerHost(30)
                 .connectionsPerHost(500)
+                .timeout(10_000)
                 .connectTimeout(100)
                 .socketTimeout(700)
                 .serverSelectionTimeout(150)
diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy
index c20fbabfb58..c007e504ae6 100644
--- a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy
+++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy
@@ -30,6 +30,7 @@ import org.bson.codecs.configuration.CodecRegistry
 import org.bson.json.JsonObject
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry
 import static com.mongodb.MongoCredential.createMongoX509Credential
@@ -340,7 +341,7 @@ class MongoClientSpecification extends Specification {
         then:
         expect database, isTheSameAs(new MongoDatabaseImpl('name', client.getCodecRegistry(), secondary(),
                 WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, STANDARD, null,
-                client.getOperationExecutor()))
+                TIMEOUT_SETTINGS.withMaxWaitTimeMS(120_000), client.getOperationExecutor()))
     }
 
     def 'should create registry reflecting UuidRepresentation'() {
diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy
index b187df8dab8..241ac958c8a 100644
--- a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy
+++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy
@@ -132,7 +132,8 @@ class MongoClientURISpecification extends Specification {
                 + 'retryWrites=true&'
                 + 'retryReads=true&'
                 + 'uuidRepresentation=csharpLegacy&'
-                + 'appName=app1')
+                + 'appName=app1&'
+                + 'timeoutMS=10000')
 
         when:
         def options = uri.getOptions()
@@ -146,6 +147,7 @@ class MongoClientURISpecification extends Specification {
         options.getMaxConnectionIdleTime() == 200
         options.getMaxConnectionLifeTime() == 300
         options.getMaxConnecting() == 1
+        options.getTimeout() == 10_000
         options.getSocketTimeout() == 5500
         options.getConnectTimeout() == 2500
         options.getRequiredReplicaSetName() == 'test'
@@ -167,6 +169,7 @@ class MongoClientURISpecification extends Specification {
         then:
         options.getConnectionsPerHost() == 100
         options.getMaxConnecting() == 2
+        options.getTimeout() == null
         options.getMaxWaitTime() == 120000
         options.getConnectTimeout() == 10000
         options.getSocketTimeout() == 0
@@ -188,6 +191,7 @@ class MongoClientURISpecification extends Specification {
                 .writeConcern(WriteConcern.JOURNALED)
                 .minConnectionsPerHost(30)
                 .connectionsPerHost(500)
+                .timeout(10_000)
                 .connectTimeout(100)
                 .socketTimeout(700)
                 .serverSelectionTimeout(150)
@@ -216,6 +220,7 @@ class MongoClientURISpecification extends Specification {
         options.getWriteConcern() == WriteConcern.JOURNALED
         options.getRetryWrites()
         options.getRetryReads()
+        options.getTimeout() == 10_000
         options.getServerSelectionTimeout() == 150
         options.getMaxWaitTime() == 200
         options.getMaxConnectionIdleTime() == 300
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java
index a879094fa37..0642d0fc8f9 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java
@@ -17,6 +17,9 @@
 package com.mongodb.reactivestreams.client;
 
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -172,6 +175,27 @@ public interface AggregatePublisher<TResult> extends Publisher<TResult> {
      */
     AggregatePublisher<TResult> batchSize(int batchSize);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    AggregatePublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java
index bf47ed7d9a2..2b695621dc3 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.reactivestreams.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -94,6 +97,20 @@ public interface DistinctPublisher<TResult> extends Publisher<TResult> {
      */
     DistinctPublisher<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    DistinctPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java
index 8a485facaf5..1128c87bd02 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java
@@ -18,6 +18,9 @@
 
 import com.mongodb.CursorType;
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.Projections;
 import com.mongodb.lang.Nullable;
@@ -269,6 +272,27 @@ public interface FindPublisher<TResult> extends Publisher<TResult> {
      */
     FindPublisher<TResult> allowDiskUse(@Nullable Boolean allowDiskUse);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    FindPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Explain the execution plan for this operation with the server's default verbosity level
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java
index dadef9dfab9..50808928172 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.reactivestreams.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
@@ -84,6 +87,20 @@ public interface ListCollectionsPublisher<TResult> extends Publisher<TResult> {
      */
     ListCollectionsPublisher<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListCollectionsPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java
index 6f6f11e5296..0dea2b0e219 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java
@@ -17,6 +17,9 @@
 
 package com.mongodb.reactivestreams.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
@@ -107,6 +110,20 @@ public interface ListDatabasesPublisher<TResult> extends Publisher<TResult> {
      */
     ListDatabasesPublisher<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListDatabasesPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java
index 9ee05851576..f2abb11a9bb 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.reactivestreams.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.reactivestreams.Publisher;
@@ -73,6 +76,20 @@ public interface ListIndexesPublisher<TResult> extends Publisher<TResult> {
      */
     ListIndexesPublisher<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListIndexesPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java
index 2eacc6922bb..f7d0eb74f6c 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java
@@ -17,7 +17,10 @@
 package com.mongodb.reactivestreams.client;
 
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Evolving;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -98,6 +101,20 @@ public interface ListSearchIndexesPublisher<TResult> extends Publisher<TResult>
      */
     ListSearchIndexesPublisher<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListSearchIndexesPublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java
index e57a8fce007..2add0f33691 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java
@@ -17,6 +17,9 @@
 package com.mongodb.reactivestreams.client;
 
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.conversions.Bson;
@@ -181,6 +184,27 @@ public interface MapReducePublisher<TResult> extends Publisher<TResult> {
      */
     MapReducePublisher<TResult> batchSize(int batchSize);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    MapReducePublisher<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Helper to return a publisher limited to the first result.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java
index ed29939fbdc..061fd3c8bed 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java
@@ -16,17 +16,12 @@
 
 package com.mongodb.reactivestreams.client;
 
-import com.mongodb.ClientSessionOptions;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.ClusterSettings;
 import com.mongodb.event.ClusterListener;
-import org.bson.Document;
-import org.bson.conversions.Bson;
-import org.reactivestreams.Publisher;
 
 import java.io.Closeable;
-import java.util.List;
 
 /**
  * A client-side representation of a MongoDB cluster.  Instances can represent either a standalone MongoDB instance, a replica set,
@@ -39,14 +34,7 @@
  * @since 1.0
  */
 @Immutable
-public interface MongoClient extends Closeable {
-    /**
-     * Gets the database with the given name.
-     *
-     * @param name the name of the database
-     * @return the database
-     */
-    MongoDatabase getDatabase(String name);
+public interface MongoClient extends MongoCluster, Closeable {
 
     /**
      * Close the client, which will close all underlying cached resources, including, for example,
@@ -54,179 +42,6 @@ public interface MongoClient extends Closeable {
      */
     void close();
 
-    /**
-     * Get a list of the database names
-     *
-     * @mongodb.driver.manual reference/commands/listDatabases List Databases
-     * @return an iterable containing all the names of all the databases
-     */
-    Publisher<String> listDatabaseNames();
-
-    /**
-     * Get a list of the database names
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @mongodb.driver.manual reference/commands/listDatabases List Databases
-     * @return an iterable containing all the names of all the databases
-     *
-     * @mongodb.server.release 3.6
-     * @since 1.7
-     */
-    Publisher<String> listDatabaseNames(ClientSession clientSession);
-
-    /**
-     * Gets the list of databases
-     *
-     * @return the fluent list databases interface
-     */
-    ListDatabasesPublisher<Document> listDatabases();
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clazz the class to cast the database documents to
-     * @param <TResult>   the type of the class to use instead of {@code Document}.
-     * @return the fluent list databases interface
-     */
-    <TResult> ListDatabasesPublisher<TResult> listDatabases(Class<TResult> clazz);
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the fluent list databases interface
-     * @mongodb.server.release 3.6
-     * @since 1.7
-     */
-    ListDatabasesPublisher<Document> listDatabases(ClientSession clientSession);
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param clazz the class to cast the database documents to
-     * @param <TResult>   the type of the class to use instead of {@code Document}.
-     * @return the fluent list databases interface
-     * @mongodb.server.release 3.6
-     * @since 1.7
-     */
-    <TResult> ListDatabasesPublisher<TResult> listDatabases(ClientSession clientSession, Class<TResult> clazz);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     */
-    ChangeStreamPublisher<Document> watch();
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     */
-    <TResult> ChangeStreamPublisher<TResult> watch(Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline the aggregation pipeline to apply to the change stream.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     */
-    ChangeStreamPublisher<Document> watch(List<? extends Bson> pipeline);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline    the aggregation pipeline to apply to the change stream
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     */
-    <TResult> ChangeStreamPublisher<TResult> watch(List<? extends Bson> pipeline, Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the change stream iterable
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    ChangeStreamPublisher<Document> watch(ClientSession clientSession);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    <TResult> ChangeStreamPublisher<TResult> watch(ClientSession clientSession, Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream.
-     * @return the change stream iterable
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    ChangeStreamPublisher<Document> watch(ClientSession clientSession, List<? extends Bson> pipeline);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline    the aggregation pipeline to apply to the change stream
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @since 1.9
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    <TResult> ChangeStreamPublisher<TResult> watch(ClientSession clientSession, List<? extends Bson> pipeline, Class<TResult> resultClass);
-
-    /**
-     * Creates a client session.
-     *
-     * @return a publisher for the client session.
-     * @mongodb.server.release 3.6
-     * @since 1.9
-     */
-    Publisher<ClientSession> startSession();
-
-    /**
-     * Creates a client session.
-     *
-     * @param options the options for the client session
-     * @return a publisher for the client session.
-     * @mongodb.server.release 3.6
-     * @since 1.7
-     */
-    Publisher<ClientSession> startSession(ClientSessionOptions options);
-
     /**
      * Gets the current cluster description.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java
index 28bcc068805..a2f5fb9d125 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java
@@ -21,6 +21,7 @@
 import com.mongodb.MongoClientSettings;
 import com.mongodb.MongoDriverInformation;
 import com.mongodb.connection.TransportSettings;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.connection.AsynchronousSocketChannelStreamFactoryFactory;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.DefaultClusterFactory;
@@ -148,11 +149,11 @@ private static Cluster createCluster(final MongoClientSettings settings,
                                          final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory) {
         notNull("settings", settings);
         return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(),
-                settings.getConnectionPoolSettings(),
-                InternalConnectionPoolSettings.builder().prestartAsyncWorkManager(true).build(),
-                streamFactory, heartbeatStreamFactory, settings.getCredential(), settings.getLoggerSettings(),
-                getCommandListener(settings.getCommandListeners()), settings.getApplicationName(), mongoDriverInformation,
-                settings.getCompressorList(), settings.getServerApi(), settings.getDnsClient());
+                settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().prestartAsyncWorkManager(true).build(),
+                TimeoutSettings.create(settings), streamFactory, TimeoutSettings.createHeartbeatSettings(settings), heartbeatStreamFactory,
+                settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()),
+                settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(),
+                settings.getDnsClient());
     }
 
     private static MongoDriverInformation wrapMongoDriverInformation(@Nullable final MongoDriverInformation mongoDriverInformation) {
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java
new file mode 100644
index 00000000000..ef7c0ddb79d
--- /dev/null
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.MongoNamespace;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Immutable;
+import com.mongodb.annotations.Reason;
+import com.mongodb.lang.Nullable;
+import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+import org.reactivestreams.Publisher;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * The client-side representation of a MongoDB cluster operations.
+ *
+ * <p>
+ * The originating {@link MongoClient} is responsible for the closing of resources.
+ * If the originator {@link MongoClient} is closed, then any cluster operations will fail.
+ * </p>
+ *
+ * @see MongoClient
+ * @since 5.2
+ */
+@Immutable
+public interface MongoCluster {
+
+    /**
+     * Get the codec registry for the MongoCluster.
+     *
+     * @return the {@link CodecRegistry}
+     * @since 5.2
+     */
+    CodecRegistry getCodecRegistry();
+
+    /**
+     * Get the read preference for the MongoCluster.
+     *
+     * @return the {@link ReadPreference}
+     * @since 5.2
+     */
+    ReadPreference getReadPreference();
+
+    /**
+     * Get the write concern for the MongoCluster.
+     *
+     * @return the {@link WriteConcern}
+     * @since 5.2
+     */
+    WriteConcern getWriteConcern();
+
+    /**
+     * Get the read concern for the MongoCluster.
+     *
+     * @return the {@link ReadConcern}
+     * @since 5.2
+     * @mongodb.driver.manual reference/readConcern/ Read Concern
+     */
+    ReadConcern getReadConcern();
+
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
+    /**
+     * Create a new MongoCluster instance with a different codec registry.
+     *
+     * <p>The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of
+     * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining
+     * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances
+     * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.</p>
+     *
+     * @param codecRegistry the new {@link CodecRegistry} for the database
+     * @return a new MongoCluster instance with the different codec registry
+     * @see org.bson.codecs.configuration.CodecRegistries
+     * @since 5.2
+     */
+    MongoCluster withCodecRegistry(CodecRegistry codecRegistry);
+
+    /**
+     * Create a new MongoCluster instance with a different read preference.
+     *
+     * @param readPreference the new {@link ReadPreference} for the database
+     * @return a new MongoCluster instance with the different readPreference
+     * @since 5.2
+     */
+    MongoCluster withReadPreference(ReadPreference readPreference);
+
+    /**
+     * Create a new MongoCluster instance with a different write concern.
+     *
+     * @param writeConcern the new {@link WriteConcern} for the database
+     * @return a new MongoCluster instance with the different writeConcern
+     * @since 5.2
+     */
+    MongoCluster withWriteConcern(WriteConcern writeConcern);
+
+    /**
+     * Create a new MongoCluster instance with a different read concern.
+     *
+     * @param readConcern the new {@link ReadConcern} for the database
+     * @return a new MongoCluster instance with the different ReadConcern
+     * @since 5.2
+     * @mongodb.driver.manual reference/readConcern/ Read Concern
+     */
+    MongoCluster withReadConcern(ReadConcern readConcern);
+
+    /**
+     * Create a new MongoCluster instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoCluster instance with the set time limit for the full execution of an operation.
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoCluster withTimeout(long timeout, TimeUnit timeUnit);
+
+    /**
+     * Gets a {@link MongoDatabase} instance for the given database name.
+     *
+     * @param databaseName the name of the database to retrieve
+     * @return a {@code MongoDatabase} representing the specified database
+     * @throws IllegalArgumentException if databaseName is invalid
+     * @see MongoNamespace#checkDatabaseNameValidity(String)
+     */
+    MongoDatabase getDatabase(String databaseName);
+
+    /**
+     * Creates a client session with default options.
+     *
+     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
+     *
+     * @return the client session
+     * @mongodb.server.release 3.6
+     */
+    Publisher<ClientSession> startSession();
+
+    /**
+     * Creates a client session.
+     *
+     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
+     *
+     * @param options  the options for the client session
+     * @return the client session
+     * @mongodb.server.release 3.6
+     */
+    Publisher<ClientSession> startSession(ClientSessionOptions options);
+
+    /**
+     * Get a list of the database names
+     *
+     * @return an iterable containing all the names of all the databases
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     */
+    Publisher<String> listDatabaseNames();
+
+    /**
+     * Get a list of the database names
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return an iterable containing all the names of all the databases
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    Publisher<String> listDatabaseNames(ClientSession clientSession);
+
+    /**
+     * Gets the list of databases
+     *
+     * @return the list databases iterable interface
+     */
+    ListDatabasesPublisher<Document> listDatabases();
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    ListDatabasesPublisher<Document> listDatabases(ClientSession clientSession);
+
+    /**
+     * Gets the list of databases
+     *
+     * @param resultClass the class to cast the database documents to
+     * @param <TResult>   the type of the class to use instead of {@code Document}.
+     * @return the list databases iterable interface
+     */
+    <TResult> ListDatabasesPublisher<TResult> listDatabases(Class<TResult> resultClass);
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the class to cast the database documents to
+     * @param <TResult>   the type of the class to use instead of {@code Document}.
+     * @return the list databases iterable interface
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    <TResult> ListDatabasesPublisher<TResult> listDatabases(ClientSession clientSession, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    ChangeStreamPublisher<Document> watch();
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    <TResult> ChangeStreamPublisher<TResult> watch(Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline the aggregation pipeline to apply to the change stream.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    ChangeStreamPublisher<Document> watch(List<? extends Bson> pipeline);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline    the aggregation pipeline to apply to the change stream
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    <TResult> ChangeStreamPublisher<TResult> watch(List<? extends Bson> pipeline, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    ChangeStreamPublisher<Document> watch(ClientSession clientSession);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    <TResult> ChangeStreamPublisher<TResult> watch(ClientSession clientSession, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    ChangeStreamPublisher<Document> watch(ClientSession clientSession, List<? extends Bson> pipeline);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline    the aggregation pipeline to apply to the change stream
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    <TResult> ChangeStreamPublisher<TResult> watch(ClientSession clientSession, List<? extends Bson> pipeline, Class<TResult> resultClass);
+}
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java
index 635547ef7f7..4e17208b342 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java
@@ -20,6 +20,8 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.bulk.BulkWriteResult;
 import com.mongodb.client.model.BulkWriteOptions;
@@ -45,12 +47,14 @@
 import com.mongodb.client.result.InsertManyResult;
 import com.mongodb.client.result.InsertOneResult;
 import com.mongodb.client.result.UpdateResult;
+import com.mongodb.lang.Nullable;
 import org.bson.Document;
 import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 import org.reactivestreams.Publisher;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 /**
  * The MongoCollection interface.
@@ -107,6 +111,37 @@ public interface MongoCollection<TDocument> {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into..
      *
@@ -156,6 +191,23 @@ public interface MongoCollection<TDocument> {
      */
     MongoCollection<TDocument> withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new MongoCollection instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoCollection instance with the set time limit for the full execution of an operation
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoCollection<TDocument> withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Gets an estimate of the count of documents in a collection using collection metadata.
      *
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java
index e17f2d05259..b479ece08c5 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java
@@ -19,15 +19,19 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.CreateViewOptions;
+import com.mongodb.lang.Nullable;
 import org.bson.Document;
 import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 import org.reactivestreams.Publisher;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 /**
  * The MongoDatabase interface.
@@ -74,6 +78,37 @@ public interface MongoDatabase {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      * Create a new MongoDatabase instance with a different codec registry.
      *
@@ -114,6 +149,23 @@ public interface MongoDatabase {
      */
     MongoDatabase withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoDatabase withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Gets a collection.
      *
@@ -135,6 +187,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command the command to be run
      * @return a publisher containing the command result
      */
@@ -143,6 +198,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command        the command to be run
      * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command
      * @return a publisher containing the command result
@@ -152,6 +210,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command   the command to be run
      * @param clazz     the default class to cast any documents returned from the database into.
      * @param <TResult> the type of the class to use instead of {@code Document}.
@@ -162,6 +223,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command        the command to be run
      * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command
      * @param clazz          the default class to cast any documents returned from the database into.
@@ -173,6 +237,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
      * @return a publisher containing the command result
@@ -184,6 +251,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command        the command to be run
      * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command
@@ -196,6 +266,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command   the command to be run
      * @param clazz     the default class to cast any documents returned from the database into.
@@ -209,6 +282,9 @@ public interface MongoDatabase {
     /**
      * Executes command in the context of the current database.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command        the command to be run
      * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java
index e0df38798d4..78a3f5357fc 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java
@@ -19,9 +19,12 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
+import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
@@ -29,6 +32,7 @@
 import org.reactivestreams.Publisher;
 
 import java.nio.ByteBuffer;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Represents a GridFS Bucket
@@ -75,6 +79,37 @@ public interface GridFSBucket {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 4.x
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      * Create a new GridFSBucket instance with a new chunk size in bytes.
      *
@@ -109,6 +144,23 @@ public interface GridFSBucket {
      */
     GridFSBucket withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new GridFSBucket instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new GridFSBucket instance with the set time limit for the full execution of an operation
+     * @since 4.x
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    GridFSBucket withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Uploads the contents of the given {@code Publisher} to a GridFS bucket.
      * <p>
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java
index f9160b030f0..d96c0e933da 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java
@@ -18,11 +18,14 @@
 
 import com.mongodb.ExplainVerbosity;
 import com.mongodb.MongoNamespace;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.operation.AsyncExplainableReadOperation;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.AggregatePublisher;
@@ -36,6 +39,7 @@
 
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -74,6 +78,12 @@ public AggregatePublisher<T> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public AggregatePublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public AggregatePublisher<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
         notNull("timeUnit", timeUnit);
@@ -83,8 +93,7 @@ public AggregatePublisher<T> maxTime(final long maxTime, final TimeUnit timeUnit
 
     @Override
     public AggregatePublisher<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
+        this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
 
@@ -136,7 +145,9 @@ public Publisher<Void> toCollection() {
         if (lastPipelineStage == null || !lastPipelineStage.containsKey("$out") && !lastPipelineStage.containsKey("$merge")) {
             throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge");
         }
-        return getMongoOperationPublisher().createReadOperationMono(this::getAggregateToCollectionOperation, getClientSession());
+        return getMongoOperationPublisher().createReadOperationMono(
+                (asyncOperations) -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS),
+                this::getAggregateToCollectionOperation, getClientSession());
     }
 
     @Override
@@ -161,10 +172,10 @@ public <E> Publisher<E> explain(final Class<E> explainResultClass, final Explain
 
     private <E> Publisher<E> publishExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
         notNull("explainDocumentClass", explainResultClass);
-        return getMongoOperationPublisher().createReadOperationMono(() ->
-                        asAggregateOperation(1).asAsyncExplainableOperation(verbosity,
-                                getCodecRegistry().get(explainResultClass)),
-                getClientSession());
+        return getMongoOperationPublisher().createReadOperationMono(
+                AsyncOperations::getTimeoutSettings,
+                () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity,
+                        getCodecRegistry().get(explainResultClass)), getClientSession());
     }
 
     @Override
@@ -185,15 +196,20 @@ AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBa
         }
     }
 
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS));
+    }
+
     private AsyncExplainableReadOperation<AsyncBatchCursor<T>> asAggregateOperation(final int initialBatchSize) {
         return getOperations()
-                .aggregate(pipeline, getDocumentClass(), maxTimeMS, maxAwaitTimeMS,
+                .aggregate(pipeline, getDocumentClass(), getTimeoutMode(),
                            initialBatchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel);
     }
 
     private AsyncReadOperation<Void> getAggregateToCollectionOperation() {
-        return getOperations().aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment,
-                                                     variables, aggregationLevel);
+        return getOperations().aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, bypassDocumentValidation,
+                collation, hint, hintString, comment, variables, aggregationLevel);
     }
 
     @Nullable
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java
index 3a19f14709f..cf5a9d9f25b 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java
@@ -18,6 +18,8 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadPreference;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.VisibleForTesting;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.operation.AsyncOperations;
@@ -29,9 +31,12 @@
 import org.reactivestreams.Subscriber;
 import reactor.core.publisher.Mono;
 
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 import java.util.function.Supplier;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
+import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
 
 @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PROTECTED)
@@ -39,6 +44,7 @@ public abstract class BatchCursorPublisher<T> implements Publisher<T> {
     private final ClientSession clientSession;
     private final MongoOperationPublisher<T> mongoOperationPublisher;
     private Integer batchSize;
+    private TimeoutMode timeoutMode;
 
     BatchCursorPublisher(@Nullable final ClientSession clientSession, final MongoOperationPublisher<T> mongoOperationPublisher) {
         this(clientSession, mongoOperationPublisher, null);
@@ -52,6 +58,7 @@ public abstract class BatchCursorPublisher<T> implements Publisher<T> {
     }
 
     abstract AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(int initialBatchSize);
+    abstract Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings();
 
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncFirstReadOperation() {
         return asAsyncReadOperation(1);
@@ -101,6 +108,19 @@ public Publisher<T> batchSize(final int batchSize) {
         return this;
     }
 
+    public Publisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        if (mongoOperationPublisher.getTimeoutSettings().getTimeoutMS() == null) {
+            throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set.");
+        }
+        this.timeoutMode = timeoutMode;
+        return this;
+    }
+
+    @Nullable
+    public TimeoutMode getTimeoutMode() {
+        return timeoutMode;
+    }
+
     public Publisher<T> first() {
         return batchCursor(this::asAsyncFirstReadOperation)
                 .flatMap(batchCursor -> Mono.create(sink -> {
@@ -130,7 +150,18 @@ public Mono<BatchCursor<T>> batchCursor(final int initialBatchSize) {
     }
 
     Mono<BatchCursor<T>> batchCursor(final Supplier<AsyncReadOperation<AsyncBatchCursor<T>>> supplier) {
-        return mongoOperationPublisher.createReadOperationMono(supplier, clientSession).map(BatchCursor::new);
+        return mongoOperationPublisher.createReadOperationMono(getTimeoutSettings(), supplier, clientSession).map(BatchCursor::new);
     }
 
+
+    protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
+        notNull("timeUnit", timeUnit);
+        Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS();
+        long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
+
+        isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0
+                || timeoutMS > maxAwaitTimeMS);
+
+        return maxAwaitTimeMS;
+    }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java
index 06c1857287a..8fc1a093aab 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java
@@ -20,8 +20,10 @@
 import com.mongodb.client.model.changestream.ChangeStreamDocument;
 import com.mongodb.client.model.changestream.FullDocument;
 import com.mongodb.client.model.changestream.FullDocumentBeforeChange;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ChangeStreamPublisher;
@@ -36,9 +38,9 @@
 
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 
 final class ChangeStreamPublisherImpl<T> extends BatchCursorPublisher<ChangeStreamDocument<T>>
@@ -121,8 +123,7 @@ public ChangeStreamPublisher<T> comment(@Nullable final BsonValue comment) {
 
     @Override
     public ChangeStreamPublisher<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxAwaitTimeMS = MILLISECONDS.convert(maxAwaitTime, timeUnit);
+        this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
 
@@ -140,6 +141,11 @@ public <TDocument> Publisher<TDocument> withDocumentClass(final Class<TDocument>
             AsyncReadOperation<AsyncBatchCursor<TDocument>> asAsyncReadOperation(final int initialBatchSize) {
                 return createChangeStreamOperation(getMongoOperationPublisher().getCodecRegistry().get(clazz), initialBatchSize);
             }
+
+            @Override
+            Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+                return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS));
+            }
         };
     }
 
@@ -166,8 +172,14 @@ AsyncReadOperation<AsyncBatchCursor<ChangeStreamDocument<T>>> asAsyncReadOperati
         return createChangeStreamOperation(codec, initialBatchSize);
     }
 
+
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(0, maxAwaitTimeMS));
+    }
+
     private <S> AsyncReadOperation<AsyncBatchCursor<S>> createChangeStreamOperation(final Codec<S> codec, final int initialBatchSize) {
         return getOperations().changeStream(fullDocument, fullDocumentBeforeChange, pipeline, codec, changeStreamLevel, initialBatchSize,
-                collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
+                collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java
index 46fa37bf8d2..2e87b3bccf8 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java
@@ -18,8 +18,6 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ClusterType;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
@@ -32,7 +30,6 @@
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.session.ClientSessionContext;
-import com.mongodb.internal.session.SessionContext;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
 import org.bson.BsonTimestamp;
@@ -49,13 +46,13 @@ public class ClientSessionBinding extends AbstractReferenceCounted implements As
     private final AsyncClusterAwareReadWriteBinding wrapped;
     private final ClientSession session;
     private final boolean ownsSession;
-    private final ClientSessionContext sessionContext;
+    private final OperationContext operationContext;
 
     public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final AsyncClusterAwareReadWriteBinding wrapped) {
         this.wrapped = notNull("wrapped", wrapped).retain();
         this.ownsSession = ownsSession;
         this.session = notNull("session", session);
-        this.sessionContext = new AsyncClientSessionContext(session);
+        this.operationContext = wrapped.getOperationContext().withSessionContext(new AsyncClientSessionContext(session));
     }
 
     @Override
@@ -63,25 +60,9 @@ public ReadPreference getReadPreference() {
         return wrapped.getReadPreference();
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return sessionContext;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
-        return wrapped.getOperationContext();
+        return operationContext;
     }
 
     @Override
@@ -159,25 +140,9 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return sessionContext;
-        }
-
-        @Override
-        @Nullable
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
-        }
-
         @Override
         public OperationContext getOperationContext() {
-            return wrapped.getOperationContext();
+            return operationContext;
         }
 
         @Override
@@ -277,7 +242,7 @@ public ReadConcern getReadConcern() {
             } else if (isSnapshot()) {
                 return ReadConcern.SNAPSHOT;
             } else {
-                return wrapped.getSessionContext().getReadConcern();
+                return wrapped.getOperationContext().getSessionContext().getReadConcern();
             }
         }
     }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java
index 9594a9ad533..62314c7e141 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java
@@ -23,14 +23,16 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.TransactionOptions;
 import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.operation.AbortTransactionOperation;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.internal.operation.CommitTransactionOperation;
+import com.mongodb.internal.operation.WriteConcernHelper;
 import com.mongodb.internal.session.BaseClientSessionImpl;
 import com.mongodb.internal.session.ServerSessionPool;
+import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
-import com.mongodb.reactivestreams.client.MongoClient;
 import org.reactivestreams.Publisher;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.MonoSink;
@@ -41,20 +43,22 @@
 import static com.mongodb.assertions.Assertions.assertTrue;
 import static com.mongodb.assertions.Assertions.isTrue;
 import static com.mongodb.assertions.Assertions.notNull;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 final class ClientSessionPublisherImpl extends BaseClientSessionImpl implements ClientSession {
 
+    private final MongoClientImpl mongoClient;
     private final OperationExecutor executor;
     private TransactionState transactionState = TransactionState.NONE;
     private boolean messageSentInCurrentTransaction;
     private boolean commitInProgress;
     private TransactionOptions transactionOptions;
 
-    ClientSessionPublisherImpl(final ServerSessionPool serverSessionPool, final MongoClient mongoClient,
+
+    ClientSessionPublisherImpl(final ServerSessionPool serverSessionPool, final MongoClientImpl mongoClient,
             final ClientSessionOptions options, final OperationExecutor executor) {
         super(serverSessionPool, mongoClient, options);
         this.executor = executor;
+        this.mongoClient = mongoClient;
     }
 
     @Override
@@ -100,6 +104,7 @@ public void startTransaction() {
     @Override
     public void startTransaction(final TransactionOptions transactionOptions) {
         notNull("transactionOptions", transactionOptions);
+
         Boolean snapshot = getOptions().isSnapshot();
         if (snapshot != null && snapshot) {
             throw new IllegalArgumentException("Transactions are not supported in snapshot sessions");
@@ -114,7 +119,9 @@ public void startTransaction(final TransactionOptions transactionOptions) {
         }
         getServerSession().advanceTransactionNumber();
         this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions());
-        WriteConcern writeConcern = this.transactionOptions.getWriteConcern();
+
+        TimeoutContext timeoutContext = createTimeoutContext();
+        WriteConcern writeConcern = getWriteConcern(timeoutContext);
         if (writeConcern == null) {
             throw new MongoInternalException("Invariant violated. Transaction options write concern can not be null");
         }
@@ -122,6 +129,16 @@ public void startTransaction(final TransactionOptions transactionOptions) {
             throw new MongoClientException("Transactions do not support unacknowledged write concern");
         }
         clearTransactionContext();
+        setTimeoutContext(timeoutContext);
+    }
+
+    @Nullable
+    private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) {
+        WriteConcern writeConcern = transactionOptions.getWriteConcern();
+        if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) {
+            return WriteConcernHelper.cloneWithoutTimeout(writeConcern);
+        }
+        return writeConcern;
     }
 
     @Override
@@ -142,12 +159,13 @@ public Publisher<Void> commitTransaction() {
             }
             boolean alreadyCommitted = commitInProgress || transactionState == TransactionState.COMMITTED;
             commitInProgress = true;
-
-            return executor.execute(
-                    new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()), alreadyCommitted)
-                            .recoveryToken(getRecoveryToken())
-                            .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS),
-                    readConcern, this)
+            resetTimeout();
+            TimeoutContext timeoutContext = getTimeoutContext();
+            WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext));
+            return executor
+                    .execute(
+                            new CommitTransactionOperation(writeConcern, alreadyCommitted)
+                                    .recoveryToken(getRecoveryToken()), readConcern, this)
                     .doOnTerminate(() -> {
                         commitInProgress = false;
                         transactionState = TransactionState.COMMITTED;
@@ -175,10 +193,13 @@ public Publisher<Void> abortTransaction() {
             if (readConcern == null) {
                 throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null");
             }
-            return executor.execute(
-                    new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()))
-                            .recoveryToken(getRecoveryToken()),
-                    readConcern, this)
+
+            resetTimeout();
+            TimeoutContext timeoutContext = getTimeoutContext();
+            WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext));
+            return executor
+                    .execute(new AbortTransactionOperation(writeConcern)
+                                    .recoveryToken(getRecoveryToken()), readConcern, this)
                     .onErrorResume(Throwable.class, (e) -> Mono.empty())
                     .doOnTerminate(() -> {
                         clearTransactionContext();
@@ -196,7 +217,7 @@ private void clearTransactionContextOnError(final MongoException e) {
     @Override
     public void close() {
         if (transactionState == TransactionState.IN) {
-            Mono.from(abortTransaction()).doOnSuccess(it -> close()).subscribe();
+            Mono.from(abortTransaction()).doFinally(it -> super.close()).subscribe();
         } else {
             super.close();
         }
@@ -206,9 +227,10 @@ private void cleanupTransaction(final TransactionState nextState) {
         messageSentInCurrentTransaction = false;
         transactionOptions = null;
         transactionState = nextState;
+        setTimeoutContext(null);
     }
 
-    private enum TransactionState {
-        NONE, IN, COMMITTED, ABORTED
+    private TimeoutContext createTimeoutContext() {
+        return new TimeoutContext(getTimeoutSettings(transactionOptions, executor.getTimeoutSettings()));
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java
index 16de864336f..84c0df234c5 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java
@@ -16,8 +16,11 @@
 
 package com.mongodb.reactivestreams.client.internal;
 
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
@@ -27,6 +30,7 @@
 import org.bson.conversions.Bson;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -84,9 +88,20 @@ public DistinctPublisher<T> comment(@Nullable final BsonValue comment) {
         return this;
     }
 
+    @Override
+    public DistinctPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBatchSize) {
         // initialBatchSize is ignored for distinct operations.
-        return getOperations().distinct(fieldName, filter, getDocumentClass(), maxTimeMS, collation, comment);
+        return getOperations().distinct(fieldName, filter, getDocumentClass(), collation, comment);
+    }
+
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java
index 401c02dc583..ff9fb3a8036 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java
@@ -18,10 +18,13 @@
 
 import com.mongodb.CursorType;
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.operation.AsyncExplainableReadOperation;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
@@ -32,6 +35,7 @@
 import org.reactivestreams.Publisher;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -74,7 +78,7 @@ public FindPublisher<T> maxTime(final long maxTime, final TimeUnit timeUnit) {
 
     @Override
     public FindPublisher<T> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
+        validateMaxAwaitTime(maxAwaitTime, timeUnit);
         findOptions.maxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
@@ -182,6 +186,13 @@ public FindPublisher<T> allowDiskUse(@Nullable final Boolean allowDiskUse) {
         return this;
     }
 
+    @Override
+    public FindPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        findOptions.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public Publisher<Document> explain() {
         return publishExplain(Document.class, null);
@@ -204,10 +215,10 @@ public <E> Publisher<E> explain(final Class<E> explainResultClass, final Explain
 
     private <E> Publisher<E> publishExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
         notNull("explainDocumentClass", explainResultClass);
-        return getMongoOperationPublisher().createReadOperationMono(() ->
-                        asAsyncReadOperation(0).asAsyncExplainableOperation(verbosity,
-                                getCodecRegistry().get(explainResultClass)),
-                getClientSession());
+        return getMongoOperationPublisher().createReadOperationMono(
+                getTimeoutSettings(),
+                () -> asAsyncReadOperation(0)
+                        .asAsyncExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession());
     }
 
     @Override
@@ -215,6 +226,11 @@ AsyncExplainableReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final in
         return getOperations().find(filter, getDocumentClass(), findOptions.withBatchSize(initialBatchSize));
     }
 
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(findOptions));
+    }
+
     @Override
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncFirstReadOperation() {
         return getOperations().findFirst(filter, getDocumentClass(), findOptions);
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java
index 056aaa615d4..057a8067ad3 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java
@@ -17,7 +17,10 @@
 package com.mongodb.reactivestreams.client.internal;
 
 import com.mongodb.ReadConcern;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
@@ -28,6 +31,7 @@
 import org.bson.conversions.Bson;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -76,6 +80,14 @@ public ListCollectionsPublisher<T> comment(@Nullable final BsonValue comment) {
         return this;
     }
 
+
+    @SuppressWarnings("ReactiveStreamsUnusedPublisher")
+    @Override
+    public ListCollectionsPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     /**
      * @see ListCollectionNamesPublisher#authorizedCollections(boolean)
      */
@@ -83,8 +95,14 @@ void authorizedCollections(final boolean authorizedCollections) {
         this.authorizedCollections = authorizedCollections;
     }
 
+
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBatchSize) {
         return getOperations().listCollections(getNamespace().getDatabaseName(), getDocumentClass(), filter, collectionNamesOnly,
-                authorizedCollections, initialBatchSize, maxTimeMS, comment);
+                authorizedCollections, initialBatchSize, comment, getTimeoutMode());
+    }
+
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java
index 0157401cf66..b897a8bf9df 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java
@@ -16,7 +16,10 @@
 
 package com.mongodb.reactivestreams.client.internal;
 
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
@@ -26,6 +29,7 @@
 import org.bson.conversions.Bson;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -82,8 +86,19 @@ public ListDatabasesPublisher<T> comment(@Nullable final BsonValue comment) {
         return this;
     }
 
+    @Override
+    public ListDatabasesPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
+    }
+
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBatchSize) {
-// initialBatchSize is ignored for distinct operations.
-        return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment);
+        // initialBatchSize is ignored for distinct operations.
+        return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, authorizedDatabasesOnly, comment);
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java
index 22a1f536dc0..79e5ce2a14a 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java
@@ -16,7 +16,10 @@
 
 package com.mongodb.reactivestreams.client.internal;
 
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
@@ -25,6 +28,7 @@
 import org.bson.BsonValue;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -62,7 +66,19 @@ public ListIndexesPublisher<T> comment(@Nullable final BsonValue comment) {
         return this;
     }
 
+    @SuppressWarnings("ReactiveStreamsUnusedPublisher")
+    @Override
+    public ListIndexesPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBatchSize) {
-        return getOperations().listIndexes(getDocumentClass(), initialBatchSize, maxTimeMS, comment);
+        return getOperations().listIndexes(getDocumentClass(), initialBatchSize, comment, getTimeoutMode());
+    }
+
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java
index 474ed7a6b09..035d7d3bbec 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java
@@ -17,9 +17,12 @@
 package com.mongodb.reactivestreams.client.internal;
 
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.operation.AsyncExplainableReadOperation;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher;
@@ -29,6 +32,7 @@
 import org.reactivestreams.Publisher;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 
@@ -85,6 +89,12 @@ public ListSearchIndexesPublisher<T> comment(@Nullable final String comment) {
         return this;
     }
 
+    @Override
+    public ListSearchIndexesPublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public ListSearchIndexesPublisher<T> comment(@Nullable final BsonValue comment) {
         this.comment = comment;
@@ -117,8 +127,9 @@ public <E> Publisher<E> explain(final Class<E> explainResultClass, final Explain
     }
 
     private <E> Publisher<E> publishExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
-        return getMongoOperationPublisher().createReadOperationMono(() ->
-                asAggregateOperation(1).asAsyncExplainableOperation(verbosity,
+        return getMongoOperationPublisher().createReadOperationMono(
+                (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)),
+                () -> asAggregateOperation(1).asAsyncExplainableOperation(verbosity,
                         getCodecRegistry().get(explainResultClass)), getClientSession());
     }
 
@@ -127,9 +138,12 @@ AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBa
         return asAggregateOperation(initialBatchSize);
     }
 
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return  (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
+    }
+
     private AsyncExplainableReadOperation<AsyncBatchCursor<T>> asAggregateOperation(final int initialBatchSize) {
-        return getOperations().listSearchIndexes(getDocumentClass(), maxTimeMS, indexName, initialBatchSize, collation,
-                comment,
-                allowDiskUse);
+        return getOperations().listSearchIndexes(getDocumentClass(), indexName, initialBatchSize, collation, comment, allowDiskUse);
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java
index 37e30e04e07..f8371c8afb6 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java
@@ -18,12 +18,15 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadPreference;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncReadBinding;
 import com.mongodb.internal.binding.AsyncWriteBinding;
 import com.mongodb.internal.client.model.FindOptions;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.internal.operation.MapReduceAsyncBatchCursor;
@@ -35,6 +38,7 @@
 import org.reactivestreams.Publisher;
 
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.assertions.Assertions.notNull;
@@ -151,12 +155,21 @@ public com.mongodb.reactivestreams.client.MapReducePublisher<T> bypassDocumentVa
         return this;
     }
 
+    @Override
+    public com.mongodb.reactivestreams.client.MapReducePublisher<T> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public Publisher<Void> toCollection() {
         if (inline) {
             throw new IllegalStateException("The options must specify a non-inline result");
         }
-        return getMongoOperationPublisher().createWriteOperationMono(this::createMapReduceToCollectionOperation, getClientSession());
+        return getMongoOperationPublisher().createWriteOperationMono(
+                (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS)),
+                this::createMapReduceToCollectionOperation,
+                getClientSession());
     }
 
     @Override
@@ -174,6 +187,11 @@ ReadPreference getReadPreference() {
         }
     }
 
+    @Override
+    Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+        return (asyncOperations -> asyncOperations.createTimeoutSettings(maxTimeMS));
+    }
+
     @Override
     AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBatchSize) {
         if (inline) {
@@ -187,15 +205,13 @@ AsyncReadOperation<AsyncBatchCursor<T>> asAsyncReadOperation(final int initialBa
 
     private WrappedMapReduceReadOperation<T> createMapReduceInlineOperation() {
         return new WrappedMapReduceReadOperation<>(getOperations().mapReduce(mapFunction, reduceFunction, finalizeFunction,
-                getDocumentClass(), filter, limit, maxTimeMS, jsMode, scope,
-                sort, verbose, collation));
+                getDocumentClass(), filter, limit, jsMode, scope, sort, verbose, collation));
     }
 
     private WrappedMapReduceWriteOperation createMapReduceToCollectionOperation() {
-        return new WrappedMapReduceWriteOperation(getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction,
-                                                                                        reduceFunction, finalizeFunction, filter, limit,
-                                                                                        maxTimeMS, jsMode, scope, sort, verbose, action,
-                bypassDocumentValidation, collation));
+        return new WrappedMapReduceWriteOperation(
+                getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter,
+                        limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation));
     }
 
     private AsyncReadOperation<AsyncBatchCursor<T>> createFindOperation(final int initialBatchSize) {
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java
index 95526e86ea5..27a0c9195c3 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java
@@ -18,10 +18,14 @@
 
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.ClientSessionOptions;
+import com.mongodb.ContextProvider;
 import com.mongodb.MongoClientSettings;
 import com.mongodb.MongoDriverInformation;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
 import com.mongodb.connection.ClusterDescription;
-import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
@@ -31,18 +35,19 @@
 import com.mongodb.reactivestreams.client.ClientSession;
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher;
 import com.mongodb.reactivestreams.client.MongoClient;
+import com.mongodb.reactivestreams.client.MongoCluster;
 import com.mongodb.reactivestreams.client.MongoDatabase;
+import com.mongodb.reactivestreams.client.ReactiveContextProvider;
 import com.mongodb.reactivestreams.client.internal.crypt.Crypt;
 import com.mongodb.reactivestreams.client.internal.crypt.Crypts;
 import org.bson.BsonDocument;
 import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 import org.reactivestreams.Publisher;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
 
-import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import static com.mongodb.assertions.Assertions.notNull;
@@ -59,14 +64,10 @@
 public final class MongoClientImpl implements MongoClient {
 
     private static final Logger LOGGER = Loggers.getLogger("client");
-    private final Cluster cluster;
     private final MongoClientSettings settings;
-    private final OperationExecutor executor;
     private final AutoCloseable externalResourceCloser;
-    private final ServerSessionPool serverSessionPool;
-    private final ClientSessionHelper clientSessionHelper;
-    private final MongoOperationPublisher<Document> mongoOperationPublisher;
-    private final Crypt crypt;
+
+    private final MongoClusterImpl delegate;
     private final AtomicBoolean closed;
 
     public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster,
@@ -81,66 +82,72 @@ public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInfo
 
     private MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster,
                             @Nullable final OperationExecutor executor, @Nullable final AutoCloseable externalResourceCloser) {
-        this.settings = notNull("settings", settings);
-        this.cluster = notNull("cluster", cluster);
-        this.serverSessionPool = new ServerSessionPool(cluster, settings.getServerApi());
-        this.clientSessionHelper = new ClientSessionHelper(this, serverSessionPool);
+        notNull("settings", settings);
+        notNull("cluster", cluster);
+
+        TimeoutSettings timeoutSettings = TimeoutSettings.create(settings);
+        ServerSessionPool serverSessionPool = new ServerSessionPool(cluster, timeoutSettings, settings.getServerApi());
+        ClientSessionHelper clientSessionHelper = new ClientSessionHelper(this, serverSessionPool);
+
         AutoEncryptionSettings autoEncryptSettings = settings.getAutoEncryptionSettings();
-        this.crypt = autoEncryptSettings != null ? Crypts.createCrypt(this, autoEncryptSettings) : null;
-        if (executor == null) {
-            this.executor = new OperationExecutorImpl(this, clientSessionHelper);
-        } else {
-            this.executor = executor;
+        Crypt crypt = autoEncryptSettings != null ? Crypts.createCrypt(settings, autoEncryptSettings) : null;
+        ContextProvider contextProvider = settings.getContextProvider();
+        if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) {
+            throw new IllegalArgumentException("The contextProvider must be an instance of "
+                    + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver");
         }
+        OperationExecutor operationExecutor = executor != null ? executor
+                : new OperationExecutorImpl(this, clientSessionHelper, timeoutSettings, (ReactiveContextProvider) contextProvider);
+        MongoOperationPublisher<Document> mongoOperationPublisher = new MongoOperationPublisher<>(Document.class,
+                withUuidRepresentation(settings.getCodecRegistry(),
+                        settings.getUuidRepresentation()),
+                settings.getReadPreference(),
+                settings.getReadConcern(), settings.getWriteConcern(),
+                settings.getRetryWrites(), settings.getRetryReads(),
+                settings.getUuidRepresentation(),
+                settings.getAutoEncryptionSettings(),
+                timeoutSettings,
+                operationExecutor);
+
+        this.delegate = new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher);
         this.externalResourceCloser = externalResourceCloser;
-        this.mongoOperationPublisher = new MongoOperationPublisher<>(Document.class,
-                                                                     withUuidRepresentation(settings.getCodecRegistry(),
-                                                                     settings.getUuidRepresentation()),
-                                                                     settings.getReadPreference(),
-                                                                     settings.getReadConcern(), settings.getWriteConcern(),
-                                                                     settings.getRetryWrites(), settings.getRetryReads(),
-                                                                     settings.getUuidRepresentation(),
-                                                                     settings.getAutoEncryptionSettings(),
-                                                                     this.executor);
+        this.settings = settings;
         this.closed = new AtomicBoolean();
         BsonDocument clientMetadataDocument = createClientMetadataDocument(settings.getApplicationName(), mongoDriverInformation);
         LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings));
     }
 
     Cluster getCluster() {
-        return cluster;
+        return delegate.getCluster();
     }
 
     public ServerSessionPool getServerSessionPool() {
-        return serverSessionPool;
+        return delegate.getServerSessionPool();
     }
 
     MongoOperationPublisher<Document> getMongoOperationPublisher() {
-        return mongoOperationPublisher;
+        return delegate.getMongoOperationPublisher();
     }
 
     @Nullable
     Crypt getCrypt() {
-        return crypt;
+        return delegate.getCrypt();
     }
 
     public MongoClientSettings getSettings() {
         return settings;
     }
 
-    @Override
-    public MongoDatabase getDatabase(final String name) {
-        return new MongoDatabaseImpl(mongoOperationPublisher.withDatabase(name));
-    }
-
     @Override
     public void close() {
         if (!closed.getAndSet(true)) {
+            Crypt crypt = getCrypt();
             if (crypt != null) {
                 crypt.close();
             }
-            serverSessionPool.close();
-            cluster.close();
+            getServerSessionPool().close();
+            getCluster().close();
             if (externalResourceCloser != null) {
                 try {
                     externalResourceCloser.close();
@@ -153,91 +160,142 @@ public void close() {
 
     @Override
     public Publisher<String> listDatabaseNames() {
-        return Flux.from(listDatabases().nameOnly(true)).map(d -> d.getString("name"));
+        return delegate.listDatabaseNames();
     }
 
     @Override
     public Publisher<String> listDatabaseNames(final ClientSession clientSession) {
-        return Flux.from(listDatabases(clientSession).nameOnly(true)).map(d -> d.getString("name"));
+        return delegate.listDatabaseNames(clientSession);
     }
 
     @Override
     public ListDatabasesPublisher<Document> listDatabases() {
-        return listDatabases(Document.class);
+        return delegate.listDatabases();
     }
 
     @Override
-    public <T> ListDatabasesPublisher<T> listDatabases(final Class<T> clazz) {
-        return new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz));
+    public <TResult> ListDatabasesPublisher<TResult> listDatabases(final Class<TResult> clazz) {
+        return delegate.listDatabases(clazz);
     }
 
     @Override
     public ListDatabasesPublisher<Document> listDatabases(final ClientSession clientSession) {
-        return listDatabases(clientSession, Document.class);
+        return delegate.listDatabases(clientSession);
     }
 
     @Override
-    public <T> ListDatabasesPublisher<T> listDatabases(final ClientSession clientSession, final Class<T> clazz) {
-        return new ListDatabasesPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDocumentClass(clazz));
+    public <TResult> ListDatabasesPublisher<TResult> listDatabases(final ClientSession clientSession, final Class<TResult> clazz) {
+        return delegate.listDatabases(clientSession, clazz);
     }
 
     @Override
     public ChangeStreamPublisher<Document> watch() {
-        return watch(Collections.emptyList());
+        return delegate.watch();
     }
 
     @Override
-    public <T> ChangeStreamPublisher<T> watch(final Class<T> resultClass) {
-        return watch(Collections.emptyList(), resultClass);
+    public <TResult> ChangeStreamPublisher<TResult> watch(final Class<TResult> resultClass) {
+        return delegate.watch(resultClass);
     }
 
     @Override
     public ChangeStreamPublisher<Document> watch(final List<? extends Bson> pipeline) {
-        return watch(pipeline, Document.class);
+        return delegate.watch(pipeline);
     }
 
     @Override
-    public <T> ChangeStreamPublisher<T> watch(final List<? extends Bson> pipeline, final Class<T> resultClass) {
-        return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
-                                               resultClass, pipeline, ChangeStreamLevel.CLIENT);
+    public <TResult> ChangeStreamPublisher<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return delegate.watch(pipeline, resultClass);
     }
 
     @Override
     public ChangeStreamPublisher<Document> watch(final ClientSession clientSession) {
-        return watch(clientSession, Collections.emptyList(), Document.class);
+        return delegate.watch(clientSession);
     }
 
     @Override
-    public <T> ChangeStreamPublisher<T> watch(final ClientSession clientSession, final Class<T> resultClass) {
-        return watch(clientSession, Collections.emptyList(), resultClass);
+    public <TResult> ChangeStreamPublisher<TResult> watch(final ClientSession clientSession, final Class<TResult> resultClass) {
+        return delegate.watch(clientSession, resultClass);
     }
 
     @Override
     public ChangeStreamPublisher<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
-        return watch(clientSession, pipeline, Document.class);
+        return delegate.watch(clientSession, pipeline);
     }
 
     @Override
-    public <T> ChangeStreamPublisher<T> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
-                                              final Class<T> resultClass) {
-        return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDatabase("admin"),
-                                               resultClass, pipeline, ChangeStreamLevel.CLIENT);
+    public <TResult> ChangeStreamPublisher<TResult> watch(
+            final ClientSession clientSession, final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return delegate.watch(clientSession, pipeline, resultClass);
     }
 
     @Override
     public Publisher<ClientSession> startSession() {
-        return startSession(ClientSessionOptions.builder().build());
+        return delegate.startSession();
     }
 
     @Override
     public Publisher<ClientSession> startSession(final ClientSessionOptions options) {
-        notNull("options", options);
-        return Mono.fromCallable(() -> clientSessionHelper.createClientSession(options, executor));
+        return delegate.startSession(options);
+    }
+
+    @Override
+    public CodecRegistry getCodecRegistry() {
+        return delegate.getCodecRegistry();
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return delegate.getReadPreference();
+    }
+
+    @Override
+    public WriteConcern getWriteConcern() {
+        return delegate.getWriteConcern();
+    }
+
+    @Override
+    public ReadConcern getReadConcern() {
+        return delegate.getReadConcern();
+    }
+
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return null;
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return delegate.withCodecRegistry(codecRegistry);
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return delegate.withReadPreference(readPreference);
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return delegate.withWriteConcern(writeConcern);
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return delegate.withReadConcern(readConcern);
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return delegate.withTimeout(timeout, timeUnit);
+    }
+
+    @Override
+    public MongoDatabase getDatabase(final String name) {
+        return delegate.getDatabase(name);
     }
 
     @Override
     public ClusterDescription getClusterDescription() {
         return getCluster().getCurrentDescription();
     }
-
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java
new file mode 100644
index 00000000000..72bcf53e303
--- /dev/null
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.internal;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.internal.TimeoutSettings;
+import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.connection.Cluster;
+import com.mongodb.internal.session.ServerSessionPool;
+import com.mongodb.lang.Nullable;
+import com.mongodb.reactivestreams.client.ChangeStreamPublisher;
+import com.mongodb.reactivestreams.client.ClientSession;
+import com.mongodb.reactivestreams.client.ListDatabasesPublisher;
+import com.mongodb.reactivestreams.client.MongoCluster;
+import com.mongodb.reactivestreams.client.MongoDatabase;
+import com.mongodb.reactivestreams.client.internal.crypt.Crypt;
+import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+import org.reactivestreams.Publisher;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+final class MongoClusterImpl implements MongoCluster {
+
+    private final Cluster cluster;
+    private final Crypt crypt;
+    private final OperationExecutor operationExecutor;
+    private final ServerSessionPool serverSessionPool;
+    private final ClientSessionHelper clientSessionHelper;
+    private final MongoOperationPublisher<Document> mongoOperationPublisher;
+
+    MongoClusterImpl(final Cluster cluster, @Nullable final Crypt crypt, final OperationExecutor operationExecutor,
+            final ServerSessionPool serverSessionPool, final ClientSessionHelper clientSessionHelper,
+            final MongoOperationPublisher<Document> mongoOperationPublisher) {
+
+        this.cluster = cluster;
+        this.crypt = crypt;
+        this.operationExecutor = operationExecutor;
+        this.serverSessionPool = serverSessionPool;
+        this.clientSessionHelper = clientSessionHelper;
+        this.mongoOperationPublisher = mongoOperationPublisher;
+    }
+
+    @Override
+    public CodecRegistry getCodecRegistry() {
+        return mongoOperationPublisher.getCodecRegistry();
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return mongoOperationPublisher.getReadPreference();
+    }
+
+    @Override
+    public WriteConcern getWriteConcern() {
+        return mongoOperationPublisher.getWriteConcern();
+    }
+
+    @Override
+    public ReadConcern getReadConcern() {
+        return mongoOperationPublisher.getReadConcern();
+    }
+
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = mongoOperationPublisher.getTimeoutMS();
+        return timeoutMS != null ? MILLISECONDS.convert(timeoutMS, timeUnit) : null;
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher.withCodecRegistry(codecRegistry));
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher.withReadPreference(readPreference));
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher.withWriteConcern(writeConcern));
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher.withReadConcern(readConcern));
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper,
+                mongoOperationPublisher.withTimeout(timeout, timeUnit));
+    }
+
+    public Cluster getCluster() {
+        return cluster;
+    }
+
+    @Nullable
+    public Crypt getCrypt() {
+        return crypt;
+    }
+
+    public ClientSessionHelper getClientSessionHelper() {
+        return clientSessionHelper;
+    }
+
+    public ServerSessionPool getServerSessionPool() {
+        return serverSessionPool;
+    }
+
+    public MongoOperationPublisher<Document> getMongoOperationPublisher() {
+        return mongoOperationPublisher;
+    }
+
+    public TimeoutSettings getTimeoutSettings() {
+        return mongoOperationPublisher.getTimeoutSettings();
+    }
+
+    @Override
+    public Publisher<ClientSession> startSession() {
+        return startSession(ClientSessionOptions.builder().build());
+    }
+
+    @Override
+    public Publisher<ClientSession> startSession(final ClientSessionOptions options) {
+        notNull("options", options);
+        return Mono.fromCallable(() -> clientSessionHelper.createClientSession(options, operationExecutor));
+    }
+
+
+    @Override
+    public MongoDatabase getDatabase(final String name) {
+        return new MongoDatabaseImpl(mongoOperationPublisher.withDatabase(name));
+    }
+
+    @Override
+    public Publisher<String> listDatabaseNames() {
+        return Flux.from(listDatabases().nameOnly(true)).map(d -> d.getString("name"));
+    }
+
+    @Override
+    public Publisher<String> listDatabaseNames(final ClientSession clientSession) {
+        return Flux.from(listDatabases(clientSession).nameOnly(true)).map(d -> d.getString("name"));
+    }
+
+    @Override
+    public ListDatabasesPublisher<Document> listDatabases() {
+        return listDatabases(Document.class);
+    }
+
+    @Override
+    public <T> ListDatabasesPublisher<T> listDatabases(final Class<T> clazz) {
+        return new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz));
+    }
+
+    @Override
+    public ListDatabasesPublisher<Document> listDatabases(final ClientSession clientSession) {
+        return listDatabases(clientSession, Document.class);
+    }
+
+    @Override
+    public <T> ListDatabasesPublisher<T> listDatabases(final ClientSession clientSession, final Class<T> clazz) {
+        return new ListDatabasesPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDocumentClass(clazz));
+    }
+
+    @Override
+    public ChangeStreamPublisher<Document> watch() {
+        return watch(Collections.emptyList());
+    }
+
+    @Override
+    public <T> ChangeStreamPublisher<T> watch(final Class<T> resultClass) {
+        return watch(Collections.emptyList(), resultClass);
+    }
+
+    @Override
+    public ChangeStreamPublisher<Document> watch(final List<? extends Bson> pipeline) {
+        return watch(pipeline, Document.class);
+    }
+
+    @Override
+    public <T> ChangeStreamPublisher<T> watch(final List<? extends Bson> pipeline, final Class<T> resultClass) {
+        return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
+                resultClass, pipeline, ChangeStreamLevel.CLIENT);
+    }
+
+    @Override
+    public ChangeStreamPublisher<Document> watch(final ClientSession clientSession) {
+        return watch(clientSession, Collections.emptyList(), Document.class);
+    }
+
+    @Override
+    public <T> ChangeStreamPublisher<T> watch(final ClientSession clientSession, final Class<T> resultClass) {
+        return watch(clientSession, Collections.emptyList(), resultClass);
+    }
+
+    @Override
+    public ChangeStreamPublisher<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
+        return watch(clientSession, pipeline, Document.class);
+    }
+
+    @Override
+    public <T> ChangeStreamPublisher<T> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
+            final Class<T> resultClass) {
+        return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDatabase("admin"),
+                resultClass, pipeline, ChangeStreamLevel.CLIENT);
+    }
+
+}
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java
index d9fa18c6a54..0ac3d6a2e39 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java
@@ -62,6 +62,7 @@
 
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.notNull;
@@ -105,6 +106,12 @@ public ReadConcern getReadConcern() {
         return mongoOperationPublisher.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = mongoOperationPublisher.getTimeoutMS();
+        return (timeoutMS != null) ? notNull("timeUnit", timeUnit).convert(timeoutMS, TimeUnit.MILLISECONDS) : null;
+    }
+
     MongoOperationPublisher<T> getPublisherHelper() {
         return mongoOperationPublisher;
     }
@@ -134,6 +141,11 @@ public MongoCollection<T> withReadConcern(final ReadConcern readConcern) {
         return new MongoCollectionImpl<>(mongoOperationPublisher.withReadConcern(readConcern));
     }
 
+    @Override
+    public MongoCollection<T> withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoCollectionImpl<>(mongoOperationPublisher.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public Publisher<Long> estimatedDocumentCount() {
         return estimatedDocumentCount(new EstimatedDocumentCountOptions());
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java
index 268b9df8081..f8709f12ad8 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java
@@ -38,10 +38,12 @@
 
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.MongoNamespace.checkDatabaseNameValidity;
 import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 
 /**
@@ -82,6 +84,12 @@ public ReadConcern getReadConcern() {
         return mongoOperationPublisher.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS();
+        return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS);
+    }
+
     MongoOperationPublisher<Document> getMongoOperationPublisher() {
         return mongoOperationPublisher;
     }
@@ -106,6 +114,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) {
         return new MongoDatabaseImpl(mongoOperationPublisher.withReadConcern(readConcern));
     }
 
+    @Override
+    public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoDatabaseImpl(mongoOperationPublisher.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public MongoCollection<Document> getCollection(final String collectionName) {
         return getCollection(collectionName, Document.class);
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java
index b82bb5b7362..5ccea518cb5 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java
@@ -54,6 +54,7 @@
 import com.mongodb.client.result.InsertManyResult;
 import com.mongodb.client.result.InsertOneResult;
 import com.mongodb.client.result.UpdateResult;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.bulk.WriteRequest;
 import com.mongodb.internal.operation.AsyncOperations;
@@ -74,6 +75,8 @@
 
 import java.util.HashMap;
 import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.function.Supplier;
 
@@ -95,22 +98,22 @@ public final class MongoOperationPublisher<T> {
             final Class<T> documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
             final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads,
             final UuidRepresentation uuidRepresentation, @Nullable final AutoEncryptionSettings autoEncryptionSettings,
-            final OperationExecutor executor) {
+            final TimeoutSettings timeoutSettings, final OperationExecutor executor) {
         this(new MongoNamespace("_ignored", "_ignored"), documentClass,
              codecRegistry, readPreference, readConcern, writeConcern, retryWrites, retryReads,
-             uuidRepresentation, autoEncryptionSettings, executor);
+             uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     MongoOperationPublisher(
             final MongoNamespace namespace, final Class<T> documentClass, final CodecRegistry codecRegistry,
             final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern,
             final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation,
-            @Nullable final AutoEncryptionSettings autoEncryptionSettings,
+            @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings,
             final OperationExecutor executor) {
         this.operations = new AsyncOperations<>(namespace, notNull("documentClass", documentClass),
                                            notNull("readPreference", readPreference), notNull("codecRegistry", codecRegistry),
                                            notNull("readConcern", readConcern), notNull("writeConcern", writeConcern),
-                                           retryWrites, retryReads);
+                                           retryWrites, retryReads, timeoutSettings);
         this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation);
         this.autoEncryptionSettings = autoEncryptionSettings;
         this.executor = notNull("executor", executor);
@@ -144,6 +147,15 @@ public boolean getRetryReads() {
         return operations.isRetryReads();
     }
 
+    @Nullable
+    public Long getTimeoutMS() {
+        return getTimeoutSettings().getTimeoutMS();
+    }
+
+    public TimeoutSettings getTimeoutSettings() {
+        return operations.getTimeoutSettings();
+    }
+
     Class<T> getDocumentClass() {
         return operations.getDocumentClass();
     }
@@ -175,15 +187,15 @@ <D> MongoOperationPublisher<D> withNamespaceAndDocumentClass(final MongoNamespac
             return (MongoOperationPublisher<D>) this;
         }
         return new MongoOperationPublisher<>(notNull("namespace", namespace), notNull("documentClass", documentClass),
-                                             getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(),
-                                             getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor);
+                getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(),
+                uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor);
     }
 
     MongoOperationPublisher<T> withCodecRegistry(final CodecRegistry codecRegistry) {
         return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(),
-                                             withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation),
-                                             getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(),
-                                             uuidRepresentation, autoEncryptionSettings, executor);
+                withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation),
+                getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(),
+                uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor);
     }
 
     MongoOperationPublisher<T> withReadPreference(final ReadPreference readPreference) {
@@ -191,9 +203,8 @@ MongoOperationPublisher<T> withReadPreference(final ReadPreference readPreferenc
             return this;
         }
         return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(),
-                                             notNull("readPreference", readPreference),
-                                             getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(),
-                                             uuidRepresentation, autoEncryptionSettings, executor);
+                notNull("readPreference", readPreference), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(),
+                uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor);
     }
 
     MongoOperationPublisher<T> withWriteConcern(final WriteConcern writeConcern) {
@@ -201,8 +212,8 @@ MongoOperationPublisher<T> withWriteConcern(final WriteConcern writeConcern) {
             return this;
         }
         return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), getReadPreference(), getReadConcern(),
-                                             notNull("writeConcern", writeConcern),
-                                             getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor);
+                notNull("writeConcern", writeConcern), getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings,
+                getTimeoutSettings(), executor);
     }
 
     MongoOperationPublisher<T> withReadConcern(final ReadConcern readConcern) {
@@ -210,24 +221,39 @@ MongoOperationPublisher<T> withReadConcern(final ReadConcern readConcern) {
             return this;
         }
         return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(),
-                                             getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern),
-                                             getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation,
-                                             autoEncryptionSettings, executor);
+                getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern),
+                getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation,
+                autoEncryptionSettings, getTimeoutSettings(), executor);
+    }
+
+    MongoOperationPublisher<T> withTimeout(final long timeout, final TimeUnit timeUnit) {
+        TimeoutSettings timeoutSettings = getTimeoutSettings().withTimeout(timeout, timeUnit);
+        if (Objects.equals(getTimeoutSettings(), timeoutSettings)) {
+            return this;
+        }
+        return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(),
+                getCodecRegistry(), getReadPreference(), getReadConcern(),
+                getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation,
+                autoEncryptionSettings, timeoutSettings, executor);
     }
 
     Publisher<Void> dropDatabase(@Nullable final ClientSession clientSession) {
-        return createWriteOperationMono(operations::dropDatabase, clientSession);
+        return createWriteOperationMono(operations::getTimeoutSettings, operations::dropDatabase, clientSession);
     }
 
     Publisher<Void> createCollection(
             @Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions options) {
-        return createWriteOperationMono(() -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession);
     }
 
     Publisher<Void> createView(
             @Nullable final ClientSession clientSession, final String viewName, final String viewOn,
             final List<? extends Bson> pipeline, final CreateViewOptions options) {
-        return createWriteOperationMono(() -> operations.createView(viewName, viewOn, pipeline, options), clientSession);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.createView(viewName, viewOn, pipeline, options), clientSession);
     }
 
     public <R> Publisher<R> runCommand(
@@ -237,24 +263,30 @@ public <R> Publisher<R> runCommand(
             return Mono.error(new MongoClientException("Read preference in a transaction must be primary"));
         }
         return createReadOperationMono(
+                operations::getTimeoutSettings,
                 () -> operations.commandRead(command, clazz), clientSession, notNull("readPreference", readPreference));
     }
 
 
     Publisher<Long> estimatedDocumentCount(final EstimatedDocumentCountOptions options) {
-        return createReadOperationMono(() -> operations.estimatedDocumentCount(notNull("options", options)), null);
+        return createReadOperationMono(
+                (asyncOperations -> asyncOperations.createTimeoutSettings(options)),
+                () -> operations.estimatedDocumentCount(notNull("options", options)), null);
     }
 
     Publisher<Long> countDocuments(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) {
-        return createReadOperationMono(() -> operations.countDocuments(notNull("filter", filter), notNull("options", options)
+        return createReadOperationMono(
+                (asyncOperations -> asyncOperations.createTimeoutSettings(options)),
+                () -> operations.countDocuments(notNull("filter", filter), notNull("options", options)
         ), clientSession);
     }
 
     Publisher<BulkWriteResult> bulkWrite(
             @Nullable final ClientSession clientSession,
             final List<? extends WriteModel<? extends T>> requests, final BulkWriteOptions options) {
-        return createWriteOperationMono(() -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)),
-                                        clientSession);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), clientSession);
     }
 
     Publisher<InsertOneResult> insertOne(@Nullable final ClientSession clientSession, final T document, final InsertOneOptions options) {
@@ -267,8 +299,9 @@ Publisher<InsertOneResult> insertOne(@Nullable final ClientSession clientSession
     Publisher<InsertManyResult> insertMany(
             @Nullable final ClientSession clientSession, final List<? extends T> documents,
             final InsertManyOptions options) {
-        return createWriteOperationMono(() -> operations.insertMany(notNull("documents", documents), notNull("options", options)),
-                                        clientSession)
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.insertMany(notNull("documents", documents), notNull("options", options)), clientSession)
                 .map(INSERT_MANY_RESULT_MAPPER);
     }
 
@@ -335,15 +368,17 @@ Publisher<UpdateResult> updateMany(
     }
 
     Publisher<T> findOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) {
-        return createWriteOperationMono(() -> operations.findOneAndDelete(notNull("filter", filter),
-                                                                          notNull("options", options)),
-                                        clientSession);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.findOneAndDelete(notNull("filter", filter), notNull("options", options)), clientSession);
     }
 
     Publisher<T> findOneAndReplace(
             @Nullable final ClientSession clientSession, final Bson filter, final T replacement,
             final FindOneAndReplaceOptions options) {
-        return createWriteOperationMono(() -> operations.findOneAndReplace(notNull("filter", filter),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.findOneAndReplace(notNull("filter", filter),
                                                                            notNull("replacement", replacement),
                                                                            notNull("options", options)),
                                         clientSession);
@@ -352,7 +387,9 @@ Publisher<T> findOneAndReplace(
     Publisher<T> findOneAndUpdate(
             @Nullable final ClientSession clientSession, final Bson filter, final Bson update,
             final FindOneAndUpdateOptions options) {
-        return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.findOneAndUpdate(notNull("filter", filter),
                                                                           notNull("update", update),
                                                                           notNull("options", options)),
                                         clientSession);
@@ -361,14 +398,18 @@ Publisher<T> findOneAndUpdate(
     Publisher<T> findOneAndUpdate(
             @Nullable final ClientSession clientSession, final Bson filter,
             final List<? extends Bson> update, final FindOneAndUpdateOptions options) {
-        return createWriteOperationMono(() -> operations.findOneAndUpdate(notNull("filter", filter),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.findOneAndUpdate(notNull("filter", filter),
                                                                           notNull("update", update),
                                                                           notNull("options", options)),
                                         clientSession);
     }
 
     Publisher<Void> dropCollection(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) {
-        return createWriteOperationMono(() -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession);
     }
 
     Publisher<String> createIndex(@Nullable final ClientSession clientSession, final Bson key, final IndexOptions options) {
@@ -379,8 +420,9 @@ Publisher<String> createIndex(@Nullable final ClientSession clientSession, final
     Publisher<String> createIndexes(
             @Nullable final ClientSession clientSession, final List<IndexModel> indexes,
             final CreateIndexOptions options) {
-        return createWriteOperationMono(() -> operations.createIndexes(notNull("indexes", indexes),
-                                                                       notNull("options", options)), clientSession)
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.createIndexes(notNull("indexes", indexes), notNull("options", options)), clientSession)
                 .thenMany(Flux.fromIterable(IndexHelper.getIndexNames(indexes, getCodecRegistry())));
     }
 
@@ -392,27 +434,37 @@ Publisher<String> createSearchIndex(@Nullable final String indexName, final Bson
     }
 
     Publisher<String> createSearchIndexes(final List<SearchIndexModel> indexes) {
-        return createWriteOperationMono(() -> operations.createSearchIndexes(indexes), null)
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.createSearchIndexes(indexes), null)
                 .thenMany(Flux.fromIterable(IndexHelper.getSearchIndexNames(indexes)));
     }
 
 
     public Publisher<Void> updateSearchIndex(final String name, final Bson definition) {
-       return createWriteOperationMono(() -> operations.updateSearchIndex(name, definition), null);
+       return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.updateSearchIndex(name, definition), null);
     }
 
 
     public Publisher<Void> dropSearchIndex(final String indexName) {
-        return createWriteOperationMono(() -> operations.dropSearchIndex(indexName), null);
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.dropSearchIndex(indexName), null);
     }
 
     Publisher<Void> dropIndex(@Nullable final ClientSession clientSession, final String indexName, final DropIndexOptions options) {
-        return createWriteOperationMono(() -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)),
                                         clientSession);
     }
 
     Publisher<Void> dropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) {
-        return createWriteOperationMono(() -> operations.dropIndex(notNull("keys", keys), notNull("options", options)),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.dropIndex(notNull("keys", keys), notNull("options", options)),
                                         clientSession);
     }
 
@@ -423,35 +475,45 @@ Publisher<Void> dropIndexes(@Nullable final ClientSession clientSession, final D
     Publisher<Void> renameCollection(
             @Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace,
             final RenameCollectionOptions options) {
-        return createWriteOperationMono(() -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace),
+        return createWriteOperationMono(
+                operations::getTimeoutSettings,
+                () -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace),
                                                                           notNull("options", options)),
                                         clientSession);
     }
 
-    <R> Mono<R> createReadOperationMono(
-            final Supplier<AsyncReadOperation<R>> operation,
-            @Nullable final ClientSession clientSession) {
-        return createReadOperationMono(operation, clientSession, getReadPreference());
+
+    <R> Mono<R> createReadOperationMono(final Function<AsyncOperations<?>, TimeoutSettings> timeoutSettingsFunction,
+            final Supplier<AsyncReadOperation<R>> operation, @Nullable final ClientSession clientSession) {
+        return createReadOperationMono(() -> timeoutSettingsFunction.apply(operations), operation, clientSession, getReadPreference());
     }
 
-    <R> Mono<R> createReadOperationMono(
-            final Supplier<AsyncReadOperation<R>> operation,
-            @Nullable final ClientSession clientSession,
+
+    <R> Mono<R> createReadOperationMono(final Supplier<TimeoutSettings> timeoutSettingsSupplier,
+            final Supplier<AsyncReadOperation<R>> operationSupplier, @Nullable final ClientSession clientSession,
             final ReadPreference readPreference) {
-        AsyncReadOperation<R> readOperation = operation.get();
-        return executor.execute(readOperation, readPreference, getReadConcern(), clientSession);
+        AsyncReadOperation<R> readOperation = operationSupplier.get();
+        return getExecutor(timeoutSettingsSupplier.get())
+                .execute(readOperation, readPreference, getReadConcern(), clientSession);
+    }
+
+    <R> Mono<R> createWriteOperationMono(final Function<AsyncOperations<?>, TimeoutSettings> timeoutSettingsFunction,
+            final Supplier<AsyncWriteOperation<R>> operationSupplier, @Nullable final ClientSession clientSession) {
+        return createWriteOperationMono(() -> timeoutSettingsFunction.apply(operations), operationSupplier, clientSession);
     }
 
-    <R> Mono<R> createWriteOperationMono(final Supplier<AsyncWriteOperation<R>> operation, @Nullable final ClientSession clientSession) {
-        AsyncWriteOperation<R> writeOperation = operation.get();
-        return executor.execute(writeOperation, getReadConcern(), clientSession);
+    <R> Mono<R> createWriteOperationMono(final Supplier<TimeoutSettings> timeoutSettingsSupplier,
+            final Supplier<AsyncWriteOperation<R>> operationSupplier, @Nullable final ClientSession clientSession) {
+        AsyncWriteOperation<R> writeOperation = operationSupplier.get();
+        return  getExecutor(timeoutSettingsSupplier.get())
+                .execute(writeOperation, getReadConcern(), clientSession);
     }
 
     private Mono<BulkWriteResult> createSingleWriteRequestMono(
             final Supplier<AsyncWriteOperation<BulkWriteResult>> operation,
             @Nullable final ClientSession clientSession,
             final WriteRequest.Type type) {
-        return createWriteOperationMono(operation, clientSession)
+        return createWriteOperationMono(operations::getTimeoutSettings, operation, clientSession)
                 .onErrorMap(MongoBulkWriteException.class, e -> {
                     MongoException exception;
                     WriteConcernError writeConcernError = e.getWriteConcernError();
@@ -482,6 +544,10 @@ private Mono<BulkWriteResult> createSingleWriteRequestMono(
                 });
     }
 
+    private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) {
+        return executor.withTimeoutSettings(timeoutSettings);
+    }
+
     private static final Function<BulkWriteResult, InsertOneResult> INSERT_ONE_RESULT_MAPPER = result -> {
         if (result.wasAcknowledged()) {
             BsonValue insertedId = result.getInserts().isEmpty() ? null : result.getInserts().get(0).getId();
@@ -526,6 +592,3 @@ public static <T> SingleResultCallback<T> sinkToCallback(final MonoSink<T> sink)
         };
     }
 }
-
-
-
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java
index 371168bedd8..dc165e5a5d4 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.lang.Nullable;
@@ -52,4 +53,21 @@ <T> Mono<T> execute(AsyncReadOperation<T> operation, ReadPreference readPreferen
      * @param <T> the operations result type.
      */
     <T> Mono<T> execute(AsyncWriteOperation<T> operation, ReadConcern readConcern, @Nullable ClientSession session);
+
+    /**
+     * Create a new OperationExecutor with a specific timeout settings
+     *
+     * @param timeoutSettings the TimeoutContext to use for the operations
+     * @return the new operation executor with the set timeout context
+     * @since 5.2
+     */
+    OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings);
+
+    /**
+     * Returns the current timeout settings
+     *
+     * @return the timeout settings
+     * @since 5.2
+     */
+    TimeoutSettings getTimeoutSettings();
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java
index cb9c37bea8f..1c89ab81d34 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java
@@ -15,7 +15,6 @@
  */
 package com.mongodb.reactivestreams.client.internal;
 
-import com.mongodb.ContextProvider;
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
 import com.mongodb.MongoInternalException;
@@ -26,9 +25,12 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.RequestContext;
 import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding;
 import com.mongodb.internal.binding.AsyncClusterBinding;
 import com.mongodb.internal.binding.AsyncReadWriteBinding;
+import com.mongodb.internal.connection.OperationContext;
+import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.lang.Nullable;
@@ -39,10 +41,13 @@
 import org.reactivestreams.Subscriber;
 import reactor.core.publisher.Mono;
 
+import java.util.Objects;
+
 import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL;
 import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL;
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutContext.createTimeoutContext;
 import static com.mongodb.reactivestreams.client.internal.MongoOperationPublisher.sinkToCallback;
 
 /**
@@ -52,17 +57,16 @@ public class OperationExecutorImpl implements OperationExecutor {
 
     private final MongoClientImpl mongoClient;
     private final ClientSessionHelper clientSessionHelper;
+    @Nullable
     private final ReactiveContextProvider contextProvider;
+    private final TimeoutSettings timeoutSettings;
 
-    OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper) {
+    OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper,
+            final TimeoutSettings timeoutSettings, @Nullable final ReactiveContextProvider contextProvider) {
         this.mongoClient = mongoClient;
         this.clientSessionHelper = clientSessionHelper;
-        ContextProvider contextProvider = mongoClient.getSettings().getContextProvider();
-        if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) {
-            throw new IllegalArgumentException("The contextProvider must be an instance of "
-                    + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver");
-        }
-        this.contextProvider = (ReactiveContextProvider) contextProvider;
+        this.timeoutSettings = timeoutSettings;
+        this.contextProvider = contextProvider;
     }
 
     @Override
@@ -78,10 +82,8 @@ public <T> Mono<T> execute(final AsyncReadOperation<T> operation, final ReadPref
 
         return Mono.from(subscriber ->
                 clientSessionHelper.withClientSession(session, this)
-                        .map(clientSession -> getReadWriteBinding(getContext(subscriber), readPreference, readConcern, clientSession,
-                                session == null && clientSession != null))
-                        .switchIfEmpty(Mono.fromCallable(() ->
-                                getReadWriteBinding(getContext(subscriber), readPreference, readConcern, session, false)))
+                        .map(clientSession -> getReadWriteBinding(getContext(subscriber),
+                                readPreference, readConcern, clientSession, session == null))
                         .flatMap(binding -> {
                             if (session != null && session.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) {
                                 binding.release();
@@ -114,10 +116,8 @@ public <T> Mono<T> execute(final AsyncWriteOperation<T> operation, final ReadCon
 
         return Mono.from(subscriber ->
                 clientSessionHelper.withClientSession(session, this)
-                        .map(clientSession -> getReadWriteBinding(getContext(subscriber), primary(), readConcern,
-                                clientSession, session == null && clientSession != null))
-                        .switchIfEmpty(Mono.fromCallable(() ->
-                                getReadWriteBinding(getContext(subscriber), primary(), readConcern, session, false)))
+                        .map(clientSession -> getReadWriteBinding(getContext(subscriber),
+                                primary(), readConcern, clientSession, session == null))
                         .flatMap(binding ->
                                 Mono.<T>create(sink -> operation.executeAsync(binding, (result, t) -> {
                                     try {
@@ -133,6 +133,19 @@ public <T> Mono<T> execute(final AsyncWriteOperation<T> operation, final ReadCon
         );
     }
 
+    @Override
+    public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) {
+        if (Objects.equals(timeoutSettings, newTimeoutSettings)) {
+            return this;
+        }
+        return new OperationExecutorImpl(mongoClient, clientSessionHelper, newTimeoutSettings, contextProvider);
+    }
+
+    @Override
+    public TimeoutSettings getTimeoutSettings() {
+        return timeoutSettings;
+    }
+
     private <T> RequestContext getContext(final Subscriber<T> subscriber) {
         RequestContext context = null;
         if (contextProvider != null) {
@@ -158,11 +171,14 @@ private void unpinServerAddressOnTransientTransactionError(@Nullable final Clien
         }
     }
 
-    private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext, final ReadPreference readPreference,
-            final ReadConcern readConcern, @Nullable final ClientSession session, final boolean ownsSession) {
+    private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext,
+            final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session,
+            final boolean ownsSession) {
         notNull("readPreference", readPreference);
         AsyncClusterAwareReadWriteBinding readWriteBinding = new AsyncClusterBinding(mongoClient.getCluster(),
-            getReadPreferenceForBinding(readPreference, session), readConcern, mongoClient.getSettings().getServerApi(), requestContext);
+                getReadPreferenceForBinding(readPreference, session), readConcern,
+                getOperationContext(requestContext, session, readConcern));
+
         Crypt crypt = mongoClient.getCrypt();
         if (crypt != null) {
             readWriteBinding = new CryptBinding(readWriteBinding, crypt);
@@ -176,6 +192,15 @@ private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestCo
         }
     }
 
+    private OperationContext getOperationContext(final RequestContext requestContext, final ClientSession session,
+            final ReadConcern readConcern) {
+        return new OperationContext(
+                requestContext,
+                new ReadConcernAwareNoOpSessionContext(readConcern),
+                createTimeoutContext(session, timeoutSettings),
+                mongoClient.getSettings().getServerApi());
+    }
+
     private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) {
         if (session == null) {
             return readPreference;
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java
new file mode 100644
index 00000000000..bc4da3026a9
--- /dev/null
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.internal;
+
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
+import com.mongodb.reactivestreams.client.MongoCollection;
+import com.mongodb.reactivestreams.client.MongoDatabase;
+import reactor.core.publisher.Mono;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+public final class TimeoutHelper {
+    private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit.";
+
+    private TimeoutHelper() {
+        //NOP
+    }
+
+    public static <T> MongoCollection<T> collectionWithTimeout(final MongoCollection<T> collection,
+                                                               @Nullable final Timeout timeout) {
+        return collectionWithTimeout(collection, timeout, DEFAULT_TIMEOUT_MESSAGE);
+    }
+
+    public static <T> MongoCollection<T> collectionWithTimeout(final MongoCollection<T> collection,
+                                                               @Nullable final Timeout timeout,
+                                                               final String message) {
+        if (timeout != null) {
+            return timeout.call(MILLISECONDS,
+                    () -> collection.withTimeout(0, MILLISECONDS),
+                    ms -> collection.withTimeout(ms, MILLISECONDS),
+                    () -> TimeoutContext.throwMongoTimeoutException(message));
+        }
+        return collection;
+    }
+
+    public static <T> Mono<MongoCollection<T>> collectionWithTimeoutMono(final MongoCollection<T> collection,
+                                                                         @Nullable final Timeout timeout) {
+        try {
+            return Mono.just(collectionWithTimeout(collection, timeout));
+        } catch (MongoOperationTimeoutException e) {
+            return Mono.error(e);
+        }
+    }
+
+    public static <T> Mono<MongoCollection<T>> collectionWithTimeoutDeferred(final MongoCollection<T> collection,
+                                                                             @Nullable final Timeout timeout) {
+        return Mono.defer(() -> collectionWithTimeoutMono(collection, timeout));
+    }
+
+
+    public static MongoDatabase databaseWithTimeout(final MongoDatabase database,
+                                                    @Nullable final Timeout timeout) {
+        return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout);
+    }
+
+    public static MongoDatabase databaseWithTimeout(final MongoDatabase database,
+                                                    final String message,
+                                                    @Nullable final Timeout timeout) {
+        if (timeout != null) {
+            return timeout.call(MILLISECONDS,
+                    () -> database.withTimeout(0, MILLISECONDS),
+                    ms -> database.withTimeout(ms, MILLISECONDS),
+                    () -> TimeoutContext.throwMongoTimeoutException(message));
+        }
+        return database;
+    }
+
+    private static Mono<MongoDatabase> databaseWithTimeoutMono(final MongoDatabase database,
+                                                               final String message,
+                                                               @Nullable final Timeout timeout) {
+        try {
+            return Mono.just(databaseWithTimeout(database, message, timeout));
+        } catch (MongoOperationTimeoutException e) {
+            return Mono.error(e);
+        }
+    }
+
+    public static Mono<MongoDatabase> databaseWithTimeoutDeferred(final MongoDatabase database,
+                                                                  @Nullable final Timeout timeout) {
+        return databaseWithTimeoutDeferred(database, DEFAULT_TIMEOUT_MESSAGE, timeout);
+    }
+
+    public static Mono<MongoDatabase> databaseWithTimeoutDeferred(final MongoDatabase database,
+                                                                  final String message,
+                                                                  @Nullable final Timeout timeout) {
+        return Mono.defer(() -> databaseWithTimeoutMono(database, message, timeout));
+    }
+}
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java
index 2a4b976c0dc..08df35c00f0 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java
@@ -16,21 +16,27 @@
 
 package com.mongodb.reactivestreams.client.internal.crypt;
 
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.MongoClient;
 import org.bson.BsonDocument;
 import reactor.core.publisher.Mono;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred;
 
 class CollectionInfoRetriever {
 
+    private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit.";
+
     private final MongoClient client;
 
     CollectionInfoRetriever(final MongoClient client) {
         this.client = notNull("client", client);
     }
 
-    public Mono<BsonDocument> filter(final String databaseName, final BsonDocument filter) {
-        return Mono.from(client.getDatabase(databaseName).listCollections(BsonDocument.class).filter(filter).first());
+    public Mono<BsonDocument> filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) {
+        return databaseWithTimeoutDeferred(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE, operationTimeout)
+                .flatMap(database -> Mono.from(database.listCollections(BsonDocument.class).filter(filter).first()));
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java
index d1c218cdfe9..0d15f5c970d 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java
@@ -19,12 +19,15 @@
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.crypt.capi.MongoCrypt;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.MongoClient;
 import com.mongodb.reactivestreams.client.MongoClients;
+import com.mongodb.reactivestreams.client.MongoDatabase;
 import org.bson.RawBsonDocument;
 import reactor.core.publisher.Mono;
 
@@ -36,9 +39,11 @@
 import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder;
 import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled;
 import static com.mongodb.internal.capi.MongoCryptHelper.startProcess;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred;
 
 @SuppressWarnings("UseOfProcessBuilder")
 class CommandMarker implements Closeable {
+    private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit.";
     @Nullable
     private final MongoClient client;
     @Nullable
@@ -58,7 +63,6 @@ class CommandMarker implements Closeable {
      *  <li>The extraOptions.cryptSharedLibRequired option is false.</li>
      * </ul>
      *  Then mongocryptd MUST be spawned by the driver.
-     * </p>
      */
     CommandMarker(
             final MongoCrypt mongoCrypt,
@@ -80,14 +84,14 @@ class CommandMarker implements Closeable {
         }
     }
 
-    Mono<RawBsonDocument> mark(final String databaseName, final RawBsonDocument command) {
+    Mono<RawBsonDocument> mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) {
         if (client != null) {
-            return runCommand(databaseName, command)
+            return runCommand(databaseName, command, operationTimeout)
                     .onErrorResume(Throwable.class, e -> {
-                        if (processBuilder == null) {
+                        if (processBuilder == null || e instanceof MongoOperationTimeoutException) {
                             throw MongoException.fromThrowable(e);
                         }
-                        return Mono.fromRunnable(() -> startProcess(processBuilder)).then(runCommand(databaseName, command));
+                        return Mono.fromRunnable(() -> startProcess(processBuilder)).then(runCommand(databaseName, command, operationTimeout));
                     })
                     .onErrorMap(t -> new MongoClientException("Exception in encryption library: " + t.getMessage(), t));
         } else {
@@ -95,12 +99,14 @@ Mono<RawBsonDocument> mark(final String databaseName, final RawBsonDocument comm
         }
     }
 
-    private Mono<RawBsonDocument> runCommand(final String databaseName, final RawBsonDocument command) {
+    private Mono<RawBsonDocument> runCommand(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) {
         assertNotNull(client);
-        return Mono.from(client.getDatabase(databaseName)
-                                 .withReadConcern(ReadConcern.DEFAULT)
-                                 .withReadPreference(ReadPreference.primary())
-                                 .runCommand(command, RawBsonDocument.class));
+        MongoDatabase mongoDatabase = client.getDatabase(databaseName)
+                .withReadConcern(ReadConcern.DEFAULT)
+                .withReadPreference(ReadPreference.primary());
+
+        return databaseWithTimeoutDeferred(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout)
+                .flatMap(database -> Mono.from(database.runCommand(command, RawBsonDocument.class)));
     }
 
     @Override
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java
index e34b0571665..6d5aca27457 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java
@@ -20,6 +20,7 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoInternalException;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.model.vault.DataKeyOptions;
 import com.mongodb.client.model.vault.EncryptOptions;
 import com.mongodb.client.model.vault.RewrapManyDataKeyOptions;
@@ -32,6 +33,7 @@
 import com.mongodb.internal.capi.MongoCryptHelper;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.MongoClient;
 import org.bson.BsonBinary;
@@ -128,14 +130,14 @@ public class Crypt implements Closeable {
      * @param databaseName the namespace
      * @param command      the unencrypted command
      */
-    public Mono<RawBsonDocument> encrypt(final String databaseName, final RawBsonDocument command) {
+    public Mono<RawBsonDocument> encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) {
         notNull("databaseName", databaseName);
         notNull("command", command);
 
         if (bypassAutoEncryption) {
             return Mono.fromCallable(() -> command);
         }
-        return executeStateMachine(() -> mongoCrypt.createEncryptionContext(databaseName, command), databaseName);
+        return executeStateMachine(() -> mongoCrypt.createEncryptionContext(databaseName, command), databaseName, operationTimeout);
     }
 
     /**
@@ -143,9 +145,10 @@ public Mono<RawBsonDocument> encrypt(final String databaseName, final RawBsonDoc
      *
      * @param commandResponse the encrypted command response
      */
-    public Mono<RawBsonDocument> decrypt(final RawBsonDocument commandResponse) {
+    public Mono<RawBsonDocument> decrypt(final RawBsonDocument commandResponse, @Nullable final Timeout operationTimeout) {
         notNull("commandResponse", commandResponse);
-        return executeStateMachine(() -> mongoCrypt.createDecryptionContext(commandResponse)).onErrorMap(this::wrapInClientException);
+        return executeStateMachine(() -> mongoCrypt.createDecryptionContext(commandResponse), operationTimeout)
+                .onErrorMap(this::wrapInClientException);
     }
 
     /**
@@ -154,7 +157,7 @@ public Mono<RawBsonDocument> decrypt(final RawBsonDocument commandResponse) {
      * @param kmsProvider the KMS provider to create the data key for
      * @param options     the data key options
      */
-    public Mono<RawBsonDocument> createDataKey(final String kmsProvider, final DataKeyOptions options) {
+    public Mono<RawBsonDocument> createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) {
         notNull("kmsProvider", kmsProvider);
         notNull("options", options);
         return executeStateMachine(() ->
@@ -163,7 +166,7 @@ public Mono<RawBsonDocument> createDataKey(final String kmsProvider, final DataK
                                                     .keyAltNames(options.getKeyAltNames())
                                                     .masterKey(options.getMasterKey())
                                                     .keyMaterial(options.getKeyMaterial())
-                                                    .build()));
+                                                    .build()), operationTimeout);
     }
 
     /**
@@ -172,13 +175,11 @@ public Mono<RawBsonDocument> createDataKey(final String kmsProvider, final DataK
      * @param value   the value to encrypt
      * @param options the options
      */
-    public Mono<BsonBinary> encryptExplicitly(final BsonValue value, final EncryptOptions options) {
-        notNull("value", value);
-        notNull("options", options);
-
+    public Mono<BsonBinary> encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout operationTimeout) {
         return executeStateMachine(() ->
-            mongoCrypt.createExplicitEncryptionContext(new BsonDocument("v", value), asMongoExplicitEncryptOptions(options))
-        ).map(result -> result.getBinary("v"));
+            mongoCrypt.createExplicitEncryptionContext(new BsonDocument("v", value), asMongoExplicitEncryptOptions(options)),
+                operationTimeout)
+                .map(result -> result.getBinary("v"));
     }
 
     /**
@@ -190,10 +191,10 @@ public Mono<BsonBinary> encryptExplicitly(final BsonValue value, final EncryptOp
      * @since 4.9
      * @mongodb.server.release 6.2
      */
-    @Beta(Beta.Reason.SERVER)
-    public Mono<BsonDocument> encryptExpression(final BsonDocument expression, final EncryptOptions options) {
+    @Beta(Reason.SERVER)
+    public Mono<BsonDocument> encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout operationTimeout) {
         return executeStateMachine(() ->
-                mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options))
+                mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options)), operationTimeout
         ).map(result -> result.getDocument("v"));
     }
 
@@ -202,9 +203,8 @@ public Mono<BsonDocument> encryptExpression(final BsonDocument expression, final
      *
      * @param value the encrypted value
      */
-    public Mono<BsonValue> decryptExplicitly(final BsonBinary value) {
-        notNull("value", value);
-        return executeStateMachine(() -> mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value)))
+    public Mono<BsonValue> decryptExplicitly(final BsonBinary value, @Nullable final Timeout operationTimeout) {
+        return executeStateMachine(() -> mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value)), operationTimeout)
                 .map(result -> result.get("v"));
     }
 
@@ -214,14 +214,14 @@ public Mono<BsonValue> decryptExplicitly(final BsonBinary value) {
      * @param options the rewrap many data key options
      * @return the decrypted value
      */
-    public Mono<RawBsonDocument> rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options) {
+    public Mono<RawBsonDocument> rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) {
         return executeStateMachine(() ->
                 mongoCrypt.createRewrapManyDatakeyContext(filter,
                         MongoRewrapManyDataKeyOptions
                                 .builder()
                                 .provider(options.getProvider())
                                 .masterKey(options.getMasterKey())
-                                .build())
+                                .build()), operationTimeout
         );
     }
 
@@ -240,15 +240,16 @@ public void close() {
         }
     }
 
-    private Mono<RawBsonDocument> executeStateMachine(final Supplier<MongoCryptContext> cryptContextSupplier) {
-        return executeStateMachine(cryptContextSupplier, null);
+    private Mono<RawBsonDocument> executeStateMachine(final Supplier<MongoCryptContext> cryptContextSupplier,
+                                                      @Nullable final Timeout operationTimeout) {
+        return executeStateMachine(cryptContextSupplier, null, operationTimeout);
     }
 
     private Mono<RawBsonDocument> executeStateMachine(final Supplier<MongoCryptContext> cryptContextSupplier,
-                                                      @Nullable final String databaseName) {
+                                                      @Nullable final String databaseName, @Nullable final Timeout operationTimeout) {
         try {
             MongoCryptContext cryptContext = cryptContextSupplier.get();
-            return Mono.<RawBsonDocument>create(sink -> executeStateMachineWithSink(cryptContext, databaseName, sink))
+            return Mono.<RawBsonDocument>create(sink -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout))
                     .onErrorMap(this::wrapInClientException)
                     .doFinally(s -> cryptContext.close());
         } catch (MongoCryptException e) {
@@ -257,23 +258,23 @@ private Mono<RawBsonDocument> executeStateMachine(final Supplier<MongoCryptConte
     }
 
     private void executeStateMachineWithSink(final MongoCryptContext cryptContext, @Nullable final String databaseName,
-            final MonoSink<RawBsonDocument> sink) {
+            final MonoSink<RawBsonDocument> sink, @Nullable final Timeout operationTimeout) {
         State state = cryptContext.getState();
         switch (state) {
             case NEED_MONGO_COLLINFO:
-                collInfo(cryptContext, databaseName, sink);
+                collInfo(cryptContext, databaseName, sink, operationTimeout);
                 break;
             case NEED_MONGO_MARKINGS:
-                mark(cryptContext, databaseName, sink);
+                mark(cryptContext, databaseName, sink, operationTimeout);
                 break;
             case NEED_KMS_CREDENTIALS:
-                fetchCredentials(cryptContext, databaseName, sink);
+                fetchCredentials(cryptContext, databaseName, sink, operationTimeout);
                 break;
             case NEED_MONGO_KEYS:
-                fetchKeys(cryptContext, databaseName, sink);
+                fetchKeys(cryptContext, databaseName, sink, operationTimeout);
                 break;
             case NEED_KMS:
-                decryptKeys(cryptContext, databaseName, sink);
+                decryptKeys(cryptContext, databaseName, sink, operationTimeout);
                 break;
             case READY:
                 sink.success(cryptContext.finish());
@@ -287,10 +288,10 @@ private void executeStateMachineWithSink(final MongoCryptContext cryptContext, @
     }
 
     private void fetchCredentials(final MongoCryptContext cryptContext, @Nullable final String databaseName,
-            final MonoSink<RawBsonDocument> sink) {
+            final MonoSink<RawBsonDocument> sink, @Nullable final Timeout operationTimeout) {
         try {
             cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers));
-            executeStateMachineWithSink(cryptContext, databaseName, sink);
+            executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout);
         } catch (Exception e) {
             sink.error(e);
         }
@@ -298,19 +299,19 @@ private void fetchCredentials(final MongoCryptContext cryptContext, @Nullable fi
 
     private void collInfo(final MongoCryptContext cryptContext,
                           @Nullable final String databaseName,
-                          final MonoSink<RawBsonDocument> sink) {
+                          final MonoSink<RawBsonDocument> sink, @Nullable final Timeout operationTimeout) {
         if (collectionInfoRetriever == null) {
             sink.error(new IllegalStateException("Missing collection Info retriever"));
         } else if (databaseName == null) {
             sink.error(new IllegalStateException("Missing database name"));
         } else {
-            collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation())
+            collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation(), operationTimeout)
                     .doOnSuccess(result -> {
                         if (result != null) {
                             cryptContext.addMongoOperationResult(result);
                         }
                         cryptContext.completeMongoOperation();
-                        executeStateMachineWithSink(cryptContext, databaseName, sink);
+                        executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout);
                     })
                     .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t)))
                     .subscribe();
@@ -319,17 +320,18 @@ private void collInfo(final MongoCryptContext cryptContext,
 
     private void mark(final MongoCryptContext cryptContext,
                       @Nullable final String databaseName,
-                      final MonoSink<RawBsonDocument> sink) {
+                      final MonoSink<RawBsonDocument> sink,
+                      @Nullable final Timeout operationTimeout) {
         if (commandMarker == null) {
             sink.error(wrapInClientException(new MongoInternalException("Missing command marker")));
         } else if (databaseName == null) {
             sink.error(wrapInClientException(new IllegalStateException("Missing database name")));
         } else {
-            commandMarker.mark(databaseName, cryptContext.getMongoOperation())
+            commandMarker.mark(databaseName, cryptContext.getMongoOperation(), operationTimeout)
                     .doOnSuccess(result -> {
                         cryptContext.addMongoOperationResult(result);
                         cryptContext.completeMongoOperation();
-                        executeStateMachineWithSink(cryptContext, databaseName, sink);
+                        executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout);
                     })
                     .doOnError(e -> sink.error(wrapInClientException(e)))
                     .subscribe();
@@ -338,14 +340,15 @@ private void mark(final MongoCryptContext cryptContext,
 
     private void fetchKeys(final MongoCryptContext cryptContext,
                            @Nullable final String databaseName,
-                           final MonoSink<RawBsonDocument> sink) {
-        keyRetriever.find(cryptContext.getMongoOperation())
+                           final MonoSink<RawBsonDocument> sink,
+                           @Nullable final Timeout operationTimeout) {
+        keyRetriever.find(cryptContext.getMongoOperation(), operationTimeout)
                 .doOnSuccess(results -> {
                     for (BsonDocument result : results) {
                         cryptContext.addMongoOperationResult(result);
                     }
                     cryptContext.completeMongoOperation();
-                    executeStateMachineWithSink(cryptContext, databaseName, sink);
+                    executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout);
                 })
                 .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t)))
                 .subscribe();
@@ -353,16 +356,17 @@ private void fetchKeys(final MongoCryptContext cryptContext,
 
     private void decryptKeys(final MongoCryptContext cryptContext,
                              @Nullable final String databaseName,
-                             final MonoSink<RawBsonDocument> sink) {
+                             final MonoSink<RawBsonDocument> sink,
+                             @Nullable final Timeout operationTimeout) {
         MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor();
         if (keyDecryptor != null) {
-            keyManagementService.decryptKey(keyDecryptor)
-                    .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink))
+            keyManagementService.decryptKey(keyDecryptor, operationTimeout)
+                    .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink, operationTimeout))
                     .doOnError(e -> sink.error(wrapInClientException(e)))
                     .subscribe();
         } else {
             Mono.fromRunnable(cryptContext::completeKeyDecryptors)
-                    .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink))
+                    .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout))
                     .doOnError(e -> sink.error(wrapInClientException(e)))
                     .subscribe();
         }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java
index ae100283ab8..1dcc8a07d62 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java
@@ -17,17 +17,13 @@
 package com.mongodb.reactivestreams.client.internal.crypt;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.async.SingleResultCallback;
 import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding;
 import com.mongodb.internal.binding.AsyncConnectionSource;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -58,22 +54,6 @@ public void getWriteConnectionSource(final SingleResultCallback<AsyncConnectionS
         });
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return wrapped.getSessionContext();
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return wrapped.getOperationContext();
@@ -90,7 +70,6 @@ public void getReadConnectionSource(final SingleResultCallback<AsyncConnectionSo
         });
     }
 
-
     @Override
     public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference,
             final SingleResultCallback<AsyncConnectionSource> callback) {
@@ -144,22 +123,6 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return wrapped.getSessionContext();
-        }
-
-        @Override
-        @Nullable
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return wrapped.getOperationContext();
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java
index 276ad0be146..f7466c14828 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java
@@ -20,12 +20,13 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.connection.ConnectionDescription;
 import com.mongodb.internal.async.SingleResultCallback;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.connection.AsyncConnection;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.MessageSettings;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.SplittablePayload;
 import com.mongodb.internal.connection.SplittablePayloadBsonWriter;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.internal.validator.MappedFieldNameValidator;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinaryReader;
@@ -90,16 +91,17 @@ public ConnectionDescription getDescription() {
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
                                  @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-                                 final BindingContext context, final SingleResultCallback<T> callback) {
+                                 final OperationContext operationContext, final SingleResultCallback<T> callback) {
         commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder,
-                context, true, null, null, callback);
+                operationContext, true, null, null, callback);
     }
 
     @Override
     public <T> void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
                                  @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-            final BindingContext context, final boolean responseExpected, @Nullable final SplittablePayload payload,
-                                 @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback<T> callback) {
+                                 final OperationContext operationContext, final boolean responseExpected,
+                                 @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator,
+                                 final SingleResultCallback<T> callback) {
 
         if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) {
             callback.onResult(null, new MongoClientException("Auto-encryption requires a minimum MongoDB version of 4.2"));
@@ -116,12 +118,14 @@ public <T> void commandAsync(final String database, final BsonDocument command,
                     : new SplittablePayloadBsonWriter(bsonBinaryWriter, bsonOutput, createSplittablePayloadMessageSettings(), payload,
                                                       MAX_SPLITTABLE_DOCUMENT_SIZE);
 
+            Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout();
+
             getEncoder(command).encode(writer, command, EncoderContext.builder().build());
-            crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()))
+            crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout)
                     .flatMap((Function<RawBsonDocument, Mono<RawBsonDocument>>) encryptedCommand ->
                             Mono.create(sink -> wrapped.commandAsync(database, encryptedCommand, commandFieldNameValidator, readPreference,
-                                    new RawBsonDocumentCodec(), context, responseExpected, null, null, sinkToCallback(sink))))
-                    .flatMap(crypt::decrypt)
+                                    new RawBsonDocumentCodec(), operationContext, responseExpected, null, null, sinkToCallback(sink))))
+                    .flatMap(rawBsonDocument -> crypt.decrypt(rawBsonDocument, operationTimeout))
                     .map(decryptedResponse ->
                         commandResultDecoder.decode(new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()),
                                                     DecoderContext.builder().build())
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java
index 0e493f8c364..d59b1e03696 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java
@@ -25,7 +25,6 @@
 import com.mongodb.crypt.capi.MongoCrypts;
 import com.mongodb.reactivestreams.client.MongoClient;
 import com.mongodb.reactivestreams.client.MongoClients;
-import com.mongodb.reactivestreams.client.internal.MongoClientImpl;
 
 import javax.net.ssl.SSLContext;
 import java.security.NoSuchAlgorithmException;
@@ -41,11 +40,11 @@ public final class Crypts {
     private Crypts() {
     }
 
-    public static Crypt createCrypt(final MongoClientImpl client, final AutoEncryptionSettings settings) {
+    public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings autoEncryptionSettings) {
         MongoClient sharedInternalClient = null;
-        MongoClientSettings keyVaultMongoClientSettings = settings.getKeyVaultMongoClientSettings();
-        if (keyVaultMongoClientSettings == null || !settings.isBypassAutoEncryption()) {
-            MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(client.getSettings())
+        MongoClientSettings keyVaultMongoClientSettings = autoEncryptionSettings.getKeyVaultMongoClientSettings();
+        if (keyVaultMongoClientSettings == null || !autoEncryptionSettings.isBypassAutoEncryption()) {
+            MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings)
                     .applyToConnectionPoolSettings(builder -> builder.minSize(0))
                     .autoEncryptionSettings(null)
                     .build();
@@ -53,16 +52,16 @@ public static Crypt createCrypt(final MongoClientImpl client, final AutoEncrypti
         }
         MongoClient keyVaultClient = keyVaultMongoClientSettings == null
                 ? sharedInternalClient : MongoClients.create(keyVaultMongoClientSettings);
-        MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(settings));
+        MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(autoEncryptionSettings));
         return new Crypt(
                 mongoCrypt,
-                createKeyRetriever(keyVaultClient, settings.getKeyVaultNamespace()),
-                createKeyManagementService(settings.getKmsProviderSslContextMap()),
-                settings.getKmsProviders(),
-                settings.getKmsProviderPropertySuppliers(),
-                settings.isBypassAutoEncryption(),
-                settings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient),
-                new CommandMarker(mongoCrypt, settings),
+                createKeyRetriever(keyVaultClient, autoEncryptionSettings.getKeyVaultNamespace()),
+                createKeyManagementService(autoEncryptionSettings.getKmsProviderSslContextMap()),
+                autoEncryptionSettings.getKmsProviders(),
+                autoEncryptionSettings.getKmsProviderPropertySuppliers(),
+                autoEncryptionSettings.isBypassAutoEncryption(),
+                autoEncryptionSettings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient),
+                new CommandMarker(mongoCrypt, autoEncryptionSettings),
                 sharedInternalClient,
                 keyVaultClient);
     }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java
index 887129b24e1..465ffc02e80 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java
@@ -16,41 +16,53 @@
 
 package com.mongodb.reactivestreams.client.internal.crypt;
 
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoSocketException;
+import com.mongodb.MongoSocketReadTimeoutException;
+import com.mongodb.MongoSocketWriteTimeoutException;
 import com.mongodb.ServerAddress;
 import com.mongodb.connection.AsyncCompletionHandler;
 import com.mongodb.connection.SocketSettings;
 import com.mongodb.connection.SslSettings;
 import com.mongodb.crypt.capi.MongoKeyDecryptor;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.connection.AsynchronousChannelStream;
 import com.mongodb.internal.connection.DefaultInetAddressResolver;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.Stream;
 import com.mongodb.internal.connection.StreamFactory;
 import com.mongodb.internal.connection.TlsChannelStreamFactoryFactory;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.ByteBuf;
 import org.bson.ByteBufNIO;
+import org.jetbrains.annotations.NotNull;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.MonoSink;
 
 import javax.net.ssl.SSLContext;
 import java.io.Closeable;
 import java.nio.channels.CompletionHandler;
+import java.nio.channels.InterruptedByTimeoutException;
 import java.util.List;
 import java.util.Map;
 
 import static java.util.Collections.singletonList;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.bson.assertions.Assertions.assertTrue;
 
 class KeyManagementService implements Closeable {
     private static final Logger LOGGER = Loggers.getLogger("client");
+    private static final String TIMEOUT_ERROR_MESSAGE = "KMS key decryption exceeded the timeout limit.";
     private final Map<String, SSLContext> kmsProviderSslContextMap;
     private final int timeoutMillis;
     private final TlsChannelStreamFactoryFactory tlsChannelStreamFactoryFactory;
 
     KeyManagementService(final Map<String, SSLContext> kmsProviderSslContextMap, final int timeoutMillis) {
+        assertTrue("timeoutMillis > 0", timeoutMillis > 0);
         this.kmsProviderSslContextMap = kmsProviderSslContextMap;
         this.tlsChannelStreamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver());
         this.timeoutMillis = timeoutMillis;
@@ -60,7 +72,7 @@ public void close() {
         tlsChannelStreamFactoryFactory.close();
     }
 
-    Mono<Void> decryptKey(final MongoKeyDecryptor keyDecryptor) {
+    Mono<Void> decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) {
         SocketSettings socketSettings = SocketSettings.builder()
                 .connectTimeout(timeoutMillis, MILLISECONDS)
                 .readTimeout(timeoutMillis, MILLISECONDS)
@@ -74,43 +86,47 @@ Mono<Void> decryptKey(final MongoKeyDecryptor keyDecryptor) {
 
         return Mono.<Void>create(sink -> {
             Stream stream = streamFactory.create(serverAddress);
-            stream.openAsync(new AsyncCompletionHandler<Void>() {
+            OperationContext operationContext = createOperationContext(operationTimeout, socketSettings);
+            stream.openAsync(operationContext, new AsyncCompletionHandler<Void>() {
                 @Override
                 public void completed(@Nullable final Void ignored) {
-                    streamWrite(stream, keyDecryptor, sink);
+                    streamWrite(stream, keyDecryptor, operationContext, sink);
                 }
 
                 @Override
                 public void failed(final Throwable t) {
                     stream.close();
-                    sink.error(t);
+                    handleError(t, operationContext, sink);
                 }
             });
         }).onErrorMap(this::unWrapException);
     }
 
-    private void streamWrite(final Stream stream, final MongoKeyDecryptor keyDecryptor, final MonoSink<Void> sink) {
+    private void streamWrite(final Stream stream, final MongoKeyDecryptor keyDecryptor,
+                             final OperationContext operationContext, final MonoSink<Void> sink) {
         List<ByteBuf> byteBufs = singletonList(new ByteBufNIO(keyDecryptor.getMessage()));
-        stream.writeAsync(byteBufs, new AsyncCompletionHandler<Void>() {
+        stream.writeAsync(byteBufs, operationContext, new AsyncCompletionHandler<Void>() {
             @Override
             public void completed(@Nullable final Void aVoid) {
-                streamRead(stream, keyDecryptor, sink);
+                streamRead(stream, keyDecryptor, operationContext, sink);
             }
 
             @Override
             public void failed(final Throwable t) {
                 stream.close();
-                sink.error(t);
+                handleError(t, operationContext, sink);
             }
         });
     }
 
-    private void streamRead(final Stream stream, final MongoKeyDecryptor keyDecryptor, final MonoSink<Void> sink) {
+    private void streamRead(final Stream stream, final MongoKeyDecryptor keyDecryptor,
+                            final OperationContext operationContext, final MonoSink<Void> sink) {
         int bytesNeeded = keyDecryptor.bytesNeeded();
         if (bytesNeeded > 0) {
             AsynchronousChannelStream asyncStream = (AsynchronousChannelStream) stream;
             ByteBuf buffer = asyncStream.getBuffer(bytesNeeded);
-            asyncStream.getChannel().read(buffer.asNIO(), asyncStream.getSettings().getReadTimeout(MILLISECONDS), MILLISECONDS, null,
+            long readTimeoutMS = operationContext.getTimeoutContext().getReadTimeoutMS();
+            asyncStream.getChannel().read(buffer.asNIO(), readTimeoutMS, MILLISECONDS, null,
                                           new CompletionHandler<Integer, Void>() {
 
                                               @Override
@@ -119,7 +135,7 @@ public void completed(final Integer integer, final Void aVoid) {
                                                   try {
                                                       keyDecryptor.feed(buffer.asNIO());
                                                       buffer.release();
-                                                      streamRead(stream, keyDecryptor, sink);
+                                                      streamRead(stream, keyDecryptor, operationContext, sink);
                                                   } catch (Throwable t) {
                                                       sink.error(t);
                                                   }
@@ -129,7 +145,7 @@ public void completed(final Integer integer, final Void aVoid) {
                                               public void failed(final Throwable t, final Void aVoid) {
                                                   buffer.release();
                                                   stream.close();
-                                                  sink.error(t);
+                                                  handleError(t, operationContext, sink);
                                               }
                                           });
         } else {
@@ -138,7 +154,49 @@ public void failed(final Throwable t, final Void aVoid) {
         }
     }
 
+    private static void handleError(final Throwable t, final OperationContext operationContext, final MonoSink<Void> sink) {
+        if (isTimeoutException(t) && operationContext.getTimeoutContext().hasTimeoutMS()) {
+            sink.error(TimeoutContext.createMongoTimeoutException(TIMEOUT_ERROR_MESSAGE, t));
+        } else {
+            sink.error(t);
+        }
+    }
+
+    private OperationContext createOperationContext(@Nullable final Timeout operationTimeout, final SocketSettings socketSettings) {
+        TimeoutSettings timeoutSettings;
+        if (operationTimeout == null) {
+            timeoutSettings = createTimeoutSettings(socketSettings, null);
+        } else {
+            timeoutSettings = operationTimeout.call(MILLISECONDS,
+                    () -> {
+                        throw new AssertionError("operationTimeout cannot be infinite");
+                    },
+                    (ms) -> createTimeoutSettings(socketSettings, ms),
+                    () -> {
+                        throw new MongoOperationTimeoutException(TIMEOUT_ERROR_MESSAGE);
+                    });
+        }
+        return OperationContext.simpleOperationContext(new TimeoutContext(timeoutSettings));
+    }
+
+    @NotNull
+    private static TimeoutSettings createTimeoutSettings(final SocketSettings socketSettings,
+            @Nullable final Long ms) {
+        return new TimeoutSettings(
+                0,
+                socketSettings.getConnectTimeout(MILLISECONDS),
+                socketSettings.getReadTimeout(MILLISECONDS),
+                ms,
+                0);
+    }
+
     private Throwable unWrapException(final Throwable t) {
         return t instanceof MongoSocketException ? t.getCause() : t;
     }
+
+    private static boolean isTimeoutException(final Throwable t) {
+        return t instanceof MongoSocketReadTimeoutException
+                || t instanceof MongoSocketWriteTimeoutException
+                || t instanceof InterruptedByTimeoutException;
+    }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java
index 74dca9e6f60..23e3a06eff0 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java
@@ -18,7 +18,10 @@
 
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadConcern;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.MongoClient;
+import com.mongodb.reactivestreams.client.MongoCollection;
 import org.bson.BsonDocument;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
@@ -26,8 +29,10 @@
 import java.util.List;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
 
 class KeyRetriever {
+    private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit.";
     private final MongoClient client;
     private final MongoNamespace namespace;
 
@@ -36,11 +41,14 @@ class KeyRetriever {
         this.namespace = notNull("namespace", namespace);
     }
 
-    public Mono<List<BsonDocument>> find(final BsonDocument keyFilter) {
-        return Flux.from(
-                client.getDatabase(namespace.getDatabaseName()).getCollection(namespace.getCollectionName(), BsonDocument.class)
-                        .withReadConcern(ReadConcern.MAJORITY)
-                        .find(keyFilter)
-        ).collectList();
+    public Mono<List<BsonDocument>> find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) {
+        return Flux.defer(() -> {
+            MongoCollection<BsonDocument> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName(), BsonDocument.class);
+
+          return collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE)
+                    .withReadConcern(ReadConcern.MAJORITY)
+                    .find(keyFilter);
+        }).collectList();
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java
index d92f68154dc..1e81db2045e 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java
@@ -22,6 +22,7 @@
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
 import com.mongodb.client.gridfs.model.GridFSFile;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.reactivestreams.client.ClientSession;
 import com.mongodb.reactivestreams.client.MongoClients;
 import com.mongodb.reactivestreams.client.MongoCollection;
@@ -39,6 +40,8 @@
 import org.reactivestreams.Publisher;
 
 import java.nio.ByteBuffer;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createDeletePublisher;
@@ -47,6 +50,7 @@
 import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSFindPublisher;
 import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSUploadPublisher;
 import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createRenamePublisher;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.codecs.configuration.CodecRegistries.fromRegistries;
 
 
@@ -72,7 +76,7 @@ public GridFSBucketImpl(final MongoDatabase database, final String bucketName) {
              getChunksCollection(database, bucketName));
     }
 
-    GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection<GridFSFile> filesCollection,
+    private GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection<GridFSFile> filesCollection,
                      final MongoCollection<Document> chunksCollection) {
         this.bucketName = notNull("bucketName", bucketName);
         this.chunkSizeBytes = chunkSizeBytes;
@@ -115,6 +119,12 @@ public ReadConcern getReadConcern() {
         return filesCollection.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = filesCollection.getTimeout(MILLISECONDS);
+        return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS);
+    }
+
     @Override
     public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) {
         return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection);
@@ -141,6 +151,12 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) {
                                     chunksCollection.withReadConcern(readConcern));
     }
 
+    @Override
+    public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit),
+                chunksCollection.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public GridFSUploadPublisher<ObjectId> uploadFromPublisher(final String filename, final Publisher<ByteBuffer> source) {
         return uploadFromPublisher(filename, source, new GridFSUploadOptions());
@@ -202,8 +218,10 @@ public GridFSDownloadPublisher downloadToPublisher(final ObjectId id) {
 
     @Override
     public GridFSDownloadPublisher downloadToPublisher(final BsonValue id) {
-        return createGridFSDownloadPublisher(chunksCollection, null,
-                                             createGridFSFindPublisher(filesCollection, null, new BsonDocument("_id", id)));
+
+        Function<Timeout, GridFSFindPublisher> findPublisherCreator =
+                operationTimeout -> createGridFSFindPublisher(filesCollection, null, new BsonDocument("_id", id), operationTimeout);
+        return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator);
     }
 
     @Override
@@ -213,8 +231,9 @@ public GridFSDownloadPublisher downloadToPublisher(final String filename) {
 
     @Override
     public GridFSDownloadPublisher downloadToPublisher(final String filename, final GridFSDownloadOptions options) {
-        return createGridFSDownloadPublisher(chunksCollection, null,
-                                             createGridFSFindPublisher(filesCollection, null, filename, options));
+        Function<Timeout, GridFSFindPublisher> findPublisherCreator =
+                operationTimeout -> createGridFSFindPublisher(filesCollection, null, filename, options, operationTimeout);
+        return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator);
     }
 
     @Override
@@ -224,8 +243,9 @@ public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSes
 
     @Override
     public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final BsonValue id) {
-        return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession),
-                                             createGridFSFindPublisher(filesCollection, clientSession, new BsonDocument("_id", id)));
+        Function<Timeout, GridFSFindPublisher> findPublisherCreator =
+                operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, new BsonDocument("_id", id), operationTimeout);
+        return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator);
     }
 
     @Override
@@ -237,8 +257,11 @@ public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSes
     public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession,
                                                        final String filename,
                                                        final GridFSDownloadOptions options) {
-        return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession),
-                                             createGridFSFindPublisher(filesCollection, clientSession, filename, options));
+        Function<Timeout, GridFSFindPublisher> findPublisherCreator =
+                operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, filename,
+                        options, operationTimeout);
+
+        return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator);
     }
 
     @Override
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java
index e80d5dc3902..bedc6552957 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java
@@ -18,11 +18,13 @@
 
 import com.mongodb.MongoGridFSException;
 import com.mongodb.client.gridfs.model.GridFSFile;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
 import com.mongodb.reactivestreams.client.FindPublisher;
 import com.mongodb.reactivestreams.client.MongoCollection;
 import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher;
+import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher;
 import org.bson.Document;
 import org.bson.types.Binary;
 import org.reactivestreams.Publisher;
@@ -35,30 +37,32 @@
 import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutContext.startTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
 import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public class GridFSDownloadPublisherImpl implements GridFSDownloadPublisher {
+    private static final String TIMEOUT_ERROR_MESSAGE = "Finding chunks exceeded the timeout limit.";
     private final ClientSession clientSession;
-    private final Mono<GridFSFile> gridFSFileMono;
+    private final Function<Timeout, GridFSFindPublisher> gridFSFileMono;
     private final MongoCollection<Document> chunksCollection;
     private Integer bufferSizeBytes;
 
     private volatile GridFSFile fileInfo;
+    @Nullable
+    private final Long timeoutMs;
 
     public GridFSDownloadPublisherImpl(@Nullable final ClientSession clientSession,
-                                       final Mono<GridFSFile> gridFSFileMono,
+                                       final Function<Timeout, GridFSFindPublisher> gridFSFilePublisherCreator,
                                        final MongoCollection<Document> chunksCollection) {
         this.clientSession = clientSession;
-        this.gridFSFileMono = notNull("gridFSFileMono", gridFSFileMono)
-                .doOnSuccess(s -> {
-                    if (s == null) {
-                        throw new MongoGridFSException("File not found");
-                    }
-                });
+        this.gridFSFileMono = notNull("gridFSFilePublisherCreator", gridFSFilePublisherCreator);
         this.chunksCollection = notNull("chunksCollection", chunksCollection);
+        this.timeoutMs = chunksCollection.getTimeout(MILLISECONDS);
     }
 
     @Override
@@ -66,7 +70,8 @@ public Publisher<GridFSFile> getGridFSFile() {
         if (fileInfo != null) {
             return Mono.fromCallable(() -> fileInfo);
         }
-        return gridFSFileMono.doOnNext(i -> fileInfo = i);
+        return Mono.from(gridFSFileMono.apply(startTimeout(timeoutMs)))
+                .doOnNext(gridFSFile -> fileInfo = gridFSFile);
     }
 
     @Override
@@ -77,17 +82,25 @@ public GridFSDownloadPublisher bufferSizeBytes(final int bufferSizeBytes) {
 
     @Override
     public void subscribe(final Subscriber<? super ByteBuffer> subscriber) {
-        gridFSFileMono.flatMapMany((Function<GridFSFile, Flux<ByteBuffer>>) this::getChunkPublisher)
-                .subscribe(subscriber);
+        Flux.defer(()-> {
+            Timeout operationTimeout = startTimeout(timeoutMs);
+           return Mono.from(gridFSFileMono.apply(operationTimeout))
+                    .doOnSuccess(gridFSFile -> {
+                        if (gridFSFile == null) {
+                            throw new MongoGridFSException("File not found");
+                        }
+                        fileInfo = gridFSFile;
+                    }).flatMapMany((Function<GridFSFile, Flux<ByteBuffer>>) gridFSFile -> getChunkPublisher(gridFSFile, operationTimeout));
+        }).subscribe(subscriber);
     }
 
-    private Flux<ByteBuffer> getChunkPublisher(final GridFSFile gridFSFile) {
+    private Flux<ByteBuffer> getChunkPublisher(final GridFSFile gridFSFile, @Nullable final Timeout timeout) {
         Document filter = new Document("files_id", gridFSFile.getId());
         FindPublisher<Document> chunkPublisher;
         if (clientSession != null) {
-            chunkPublisher = chunksCollection.find(clientSession, filter);
+            chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(clientSession, filter);
         } else {
-            chunkPublisher = chunksCollection.find(filter);
+            chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(filter);
         }
 
         AtomicInteger chunkCounter = new AtomicInteger(0);
@@ -126,5 +139,4 @@ private Flux<ByteBuffer> getChunkPublisher(final GridFSFile gridFSFile) {
                 });
         return bufferSizeBytes == null ? byteBufferFlux : new ResizingByteBufferFlux(byteBufferFlux, bufferSizeBytes);
     }
-
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java
index 4b2878d72e3..166abca6a0b 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java
@@ -17,11 +17,13 @@
 package com.mongodb.reactivestreams.client.internal.gridfs;
 
 import com.mongodb.MongoGridFSException;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
 import com.mongodb.client.gridfs.model.GridFSFile;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
-import com.mongodb.client.result.DeleteResult;
 import com.mongodb.client.result.UpdateResult;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
 import com.mongodb.reactivestreams.client.FindPublisher;
@@ -36,9 +38,14 @@
 import reactor.core.publisher.Mono;
 
 import java.nio.ByteBuffer;
+import java.util.function.Function;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred;
 import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -68,10 +75,10 @@ public static GridFSUploadPublisherImpl createGridFSUploadPublisher(
     public static GridFSDownloadPublisherImpl createGridFSDownloadPublisher(
             final MongoCollection<Document> chunksCollection,
             @Nullable final ClientSession clientSession,
-            final GridFSFindPublisher publisher) {
+            final Function<Timeout, GridFSFindPublisher> publisher) {
         notNull("chunksCollection", chunksCollection);
-        notNull("publisher", publisher);
-        return new GridFSDownloadPublisherImpl(clientSession, Mono.from(publisher), chunksCollection);
+        notNull("gridFSFileMono", publisher);
+        return new GridFSDownloadPublisherImpl(clientSession, publisher, chunksCollection);
     }
 
     public static GridFSFindPublisher createGridFSFindPublisher(
@@ -82,11 +89,21 @@ public static GridFSFindPublisher createGridFSFindPublisher(
         return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter));
     }
 
+    public static GridFSFindPublisher createGridFSFindPublisher(
+            final MongoCollection<GridFSFile> filesCollection,
+            @Nullable final ClientSession clientSession,
+            @Nullable final Bson filter,
+            @Nullable final Timeout operationTimeout) {
+        notNull("filesCollection", filesCollection);
+        return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter, operationTimeout));
+    }
+
     public static GridFSFindPublisher createGridFSFindPublisher(
             final MongoCollection<GridFSFile> filesCollection,
             @Nullable final ClientSession clientSession,
             final String filename,
-            final GridFSDownloadOptions options) {
+            final GridFSDownloadOptions options,
+            @Nullable final Timeout operationTimeout) {
         notNull("filesCollection", filesCollection);
         notNull("filename", filename);
         notNull("options", options);
@@ -102,10 +119,32 @@ public static GridFSFindPublisher createGridFSFindPublisher(
             sort = -1;
         }
 
-        return createGridFSFindPublisher(filesCollection, clientSession, new Document("filename", filename)).skip(skip)
+        return createGridFSFindPublisher(filesCollection, clientSession, new Document("filename", filename), operationTimeout).skip(skip)
                 .sort(new Document("uploadDate", sort));
     }
 
+    public static FindPublisher<GridFSFile> createFindPublisher(
+            final MongoCollection<GridFSFile> filesCollection,
+            @Nullable final ClientSession clientSession,
+            @Nullable final Bson filter,
+            @Nullable final Timeout operationTimeout) {
+        notNull("filesCollection", filesCollection);
+        FindPublisher<GridFSFile> publisher;
+        if (clientSession == null) {
+            publisher = collectionWithTimeout(filesCollection, operationTimeout).find();
+        } else {
+            publisher = collectionWithTimeout(filesCollection, operationTimeout).find(clientSession);
+        }
+
+        if (filter != null) {
+            publisher = publisher.filter(filter);
+        }
+        if (operationTimeout != null) {
+            publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME);
+        }
+        return publisher;
+    }
+
     public static FindPublisher<GridFSFile> createFindPublisher(
             final MongoCollection<GridFSFile> filesCollection,
             @Nullable final ClientSession clientSession,
@@ -117,10 +156,12 @@ public static FindPublisher<GridFSFile> createFindPublisher(
         } else {
             publisher = filesCollection.find(clientSession);
         }
-
         if (filter != null) {
             publisher = publisher.filter(filter);
         }
+        if (filesCollection.getTimeout(MILLISECONDS) != null) {
+            publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME);
+        }
         return publisher;
     }
 
@@ -132,24 +173,29 @@ public static Publisher<Void> createDeletePublisher(final MongoCollection<GridFS
         notNull("chunksCollection", chunksCollection);
         notNull("id", id);
         BsonDocument filter = new BsonDocument("_id", id);
-        Publisher<DeleteResult> fileDeletePublisher;
-        if (clientSession == null) {
-            fileDeletePublisher = filesCollection.deleteOne(filter);
-        } else {
-            fileDeletePublisher = filesCollection.deleteOne(clientSession, filter);
-        }
-        return Mono.from(fileDeletePublisher)
-                .flatMap(deleteResult -> {
+
+        return Mono.defer(()-> {
+            Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS));
+            return collectionWithTimeoutMono(filesCollection, operationTimeout)
+                    .flatMap(wrappedCollection -> {
+                    if (clientSession == null) {
+                        return Mono.from(wrappedCollection.deleteOne(filter));
+                    } else {
+                        return Mono.from(wrappedCollection.deleteOne(clientSession, filter));
+                    }
+                }).flatMap(deleteResult -> {
                     if (deleteResult.wasAcknowledged() && deleteResult.getDeletedCount() == 0) {
-                        throw new MongoGridFSException(format("No file found with the ObjectId: %s", id));
+                        return Mono.error(new MongoGridFSException(format("No file found with the ObjectId: %s", id)));
                     }
+                    return collectionWithTimeoutMono(chunksCollection, operationTimeout);
+                }).flatMap(wrappedCollection -> {
                     if (clientSession == null) {
-                        return Mono.from(chunksCollection.deleteMany(new BsonDocument("files_id", id)));
+                        return Mono.from(wrappedCollection.deleteMany(new BsonDocument("files_id", id)));
                     } else {
-                        return Mono.from(chunksCollection.deleteMany(clientSession, new BsonDocument("files_id", id)));
+                        return Mono.from(wrappedCollection.deleteMany(clientSession, new BsonDocument("files_id", id)));
                     }
-                })
-                .flatMap(i -> Mono.empty());
+                }).then();
+            });
     }
 
     public static Publisher<Void> createRenamePublisher(final MongoCollection<GridFSFile> filesCollection,
@@ -180,20 +226,30 @@ public static Publisher<Void> createRenamePublisher(final MongoCollection<GridFS
     public static Publisher<Void> createDropPublisher(final MongoCollection<GridFSFile> filesCollection,
                                                       final MongoCollection<Document> chunksCollection,
                                                       @Nullable final ClientSession clientSession) {
-        Publisher<Void> filesDropPublisher;
-        if (clientSession == null) {
-            filesDropPublisher = filesCollection.drop();
-        } else {
-            filesDropPublisher = filesCollection.drop(clientSession);
-        }
 
-        Publisher<Void> chunksDropPublisher;
-        if (clientSession == null) {
-            chunksDropPublisher = chunksCollection.drop();
-        } else {
-            chunksDropPublisher = chunksCollection.drop(clientSession);
-        }
+        return Mono.defer(() -> {
+            Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS));
+            return collectionWithTimeoutMono(filesCollection, operationTimeout)
+                    .flatMap(wrappedCollection -> {
+                        if (clientSession == null) {
+                            return Mono.from(wrappedCollection.drop());
+                        } else {
+                            return Mono.from(wrappedCollection.drop(clientSession));
+                        }
+                    }).then(collectionWithTimeoutDeferred(chunksCollection, operationTimeout))
+                    .flatMap(wrappedCollection -> {
+                        if (clientSession == null) {
+                            return Mono.from(wrappedCollection.drop());
+                        } else {
+                            return Mono.from(wrappedCollection.drop(clientSession));
+                        }
+
+                    });
+        });
+    }
 
-        return Mono.from(filesDropPublisher).then(Mono.from(chunksDropPublisher));
+    @Nullable
+    private static Timeout startTimeout(@Nullable final Long timeoutMs) {
+        return timeoutMs == null ? null : TimeoutContext.startTimeout(timeoutMs);
     }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java
index da6cbdcbce8..a45d369c676 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java
@@ -17,13 +17,14 @@
 package com.mongodb.reactivestreams.client.internal.gridfs;
 
 import com.mongodb.MongoGridFSException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.client.gridfs.model.GridFSFile;
 import com.mongodb.client.result.DeleteResult;
 import com.mongodb.client.result.InsertOneResult;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ClientSession;
-import com.mongodb.reactivestreams.client.FindPublisher;
-import com.mongodb.reactivestreams.client.ListIndexesPublisher;
 import com.mongodb.reactivestreams.client.MongoCollection;
 import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher;
 import org.bson.BsonValue;
@@ -41,11 +42,14 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Consumer;
 import java.util.function.Function;
 
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred;
+import static java.time.Duration.ofMillis;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 
 /**
@@ -53,6 +57,7 @@
  */
 public final class GridFSUploadPublisherImpl implements GridFSUploadPublisher<Void> {
 
+    private static final String TIMEOUT_ERROR_MESSAGE = "Saving chunks exceeded the timeout limit.";
     private static final Document PROJECTION = new Document("_id", 1);
     private static final Document FILES_INDEX = new Document("filename", 1).append("uploadDate", 1);
     private static final Document CHUNKS_INDEX = new Document("files_id", 1).append("n", 1);
@@ -64,6 +69,8 @@ public final class GridFSUploadPublisherImpl implements GridFSUploadPublisher<Vo
     private final int chunkSizeBytes;
     private final Document metadata;
     private final Publisher<ByteBuffer> source;
+    @Nullable
+    private final Long timeoutMs;
 
     public GridFSUploadPublisherImpl(@Nullable final ClientSession clientSession,
                                      final MongoCollection<GridFSFile> filesCollection,
@@ -81,6 +88,7 @@ public GridFSUploadPublisherImpl(@Nullable final ClientSession clientSession,
         this.chunkSizeBytes = chunkSizeBytes;
         this.metadata = metadata;
         this.source = source;
+        this.timeoutMs = filesCollection.getTimeout(MILLISECONDS);
     }
 
     @Override
@@ -98,31 +106,23 @@ public BsonValue getId() {
 
     @Override
     public void subscribe(final Subscriber<? super Void> s) {
-        Mono.<Void>create(sink -> {
+        Mono.defer(() -> {
             AtomicBoolean terminated = new AtomicBoolean(false);
-            sink.onCancel(() -> createCancellationMono(terminated).subscribe());
-
-            Consumer<Throwable> errorHandler = e -> createCancellationMono(terminated)
-                    .doOnError(i -> sink.error(e))
-                    .doOnSuccess(i -> sink.error(e))
-                    .subscribe();
-
-            Consumer<Long> saveFileDataMono = l -> createSaveFileDataMono(terminated, l)
-                    .doOnError(errorHandler)
-                    .doOnSuccess(i -> sink.success())
-                    .subscribe();
-
-            Consumer<Void> saveChunksMono = i -> createSaveChunksMono(terminated)
-                    .doOnError(errorHandler)
-                    .doOnSuccess(saveFileDataMono)
-                    .subscribe();
-
-            createCheckAndCreateIndexesMono()
-                    .doOnError(errorHandler)
-                    .doOnSuccess(saveChunksMono)
-                    .subscribe();
-        })
-       .subscribe(s);
+            Timeout timeout = TimeoutContext.startTimeout(timeoutMs);
+            return createCheckAndCreateIndexesMono(timeout)
+                    .then(createSaveChunksMono(terminated, timeout))
+                    .flatMap(lengthInBytes -> createSaveFileDataMono(terminated, lengthInBytes, timeout))
+                    .onErrorResume(originalError ->
+                            createCancellationMono(terminated, timeout)
+                                    .onErrorMap(cancellationError -> {
+                                        // Timeout exception might occur during cancellation. It gets suppressed.
+                                        originalError.addSuppressed(cancellationError);
+                                        return originalError;
+                                    })
+                                    .then(Mono.error(originalError)))
+                    .doOnCancel(() -> createCancellationMono(terminated, timeout).subscribe())
+                    .then();
+        }).subscribe(s);
     }
 
     public GridFSUploadPublisher<ObjectId> withObjectId() {
@@ -148,47 +148,50 @@ public void subscribe(final Subscriber<? super ObjectId> subscriber) {
         };
     }
 
-    private Mono<Void> createCheckAndCreateIndexesMono() {
-        MongoCollection<Document> collection = filesCollection.withDocumentClass(Document.class).withReadPreference(primary());
-        FindPublisher<Document> findPublisher;
-        if (clientSession != null) {
-            findPublisher = collection.find(clientSession);
-        } else {
-            findPublisher = collection.find();
-        }
+    private Mono<Void> createCheckAndCreateIndexesMono(@Nullable final Timeout timeout) {
         AtomicBoolean collectionExists = new AtomicBoolean(false);
-
-        return Mono.create(sink -> Mono.from(findPublisher.projection(PROJECTION).first())
-                .subscribe(
+        return Mono.create(sink -> findAllInCollection(filesCollection, timeout).subscribe(
                         d -> collectionExists.set(true),
                         sink::error,
                         () -> {
                             if (collectionExists.get()) {
                                 sink.success();
                             } else {
-                                checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX)
-                                        .doOnError(sink::error)
-                                        .doOnSuccess(i -> {
-                                            checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX)
-                                                    .doOnError(sink::error)
-                                                    .doOnSuccess(sink::success)
-                                                    .subscribe();
-                                        })
-                                        .subscribe();
+                                checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX, timeout)
+                                        .doOnSuccess(i -> checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX, timeout)
+                                                .subscribe(unused -> {}, sink::error, sink::success))
+                                        .subscribe(unused -> {}, sink::error);
                             }
                         })
         );
     }
 
-    private <T> Mono<Boolean> hasIndex(final MongoCollection<T> collection, final Document index) {
-        ListIndexesPublisher<Document> listIndexesPublisher;
-        if (clientSession != null) {
-            listIndexesPublisher = collection.listIndexes(clientSession);
-        } else {
-            listIndexesPublisher = collection.listIndexes();
-        }
+    private Mono<Document> findAllInCollection(final MongoCollection<GridFSFile> collection, @Nullable final Timeout timeout) {
+        return collectionWithTimeoutDeferred(collection
+                .withDocumentClass(Document.class)
+                .withReadPreference(primary()), timeout)
+                .flatMap(wrappedCollection -> {
+                    if (clientSession != null) {
+                        return Mono.from(wrappedCollection.find(clientSession)
+                                .projection(PROJECTION)
+                                .first());
+                    } else {
+                        return Mono.from(wrappedCollection.find()
+                                .projection(PROJECTION)
+                                .first());
+                    }
+                });
+    }
 
-        return Flux.from(listIndexesPublisher)
+    private <T> Mono<Boolean> hasIndex(final MongoCollection<T> collection, final Document index, @Nullable final Timeout timeout) {
+        return collectionWithTimeoutDeferred(collection, timeout)
+                .map(wrappedCollection -> {
+                    if (clientSession != null) {
+                        return wrappedCollection.listIndexes(clientSession);
+                    } else {
+                        return wrappedCollection.listIndexes();
+                    }
+                }).flatMapMany(Flux::from)
                 .collectList()
                 .map(indexes -> {
                     boolean hasIndex = false;
@@ -208,25 +211,28 @@ private <T> Mono<Boolean> hasIndex(final MongoCollection<T> collection, final Do
                 });
     }
 
-    private <T> Mono<Void> checkAndCreateIndex(final MongoCollection<T> collection, final Document index) {
-        return hasIndex(collection, index).flatMap(hasIndex -> {
+    private <T> Mono<Void> checkAndCreateIndex(final MongoCollection<T> collection, final Document index, @Nullable final Timeout timeout) {
+        return hasIndex(collection, index, timeout).flatMap(hasIndex -> {
             if (!hasIndex) {
-                return createIndexMono(collection, index).flatMap(s -> Mono.empty());
+                return createIndexMono(collection, index, timeout).flatMap(s -> Mono.empty());
             } else {
                 return Mono.empty();
             }
         });
     }
 
-    private <T> Mono<String> createIndexMono(final MongoCollection<T> collection, final Document index) {
-        return Mono.from(clientSession == null ? collection.createIndex(index) : collection.createIndex(clientSession, index));
+    private <T> Mono<String> createIndexMono(final MongoCollection<T> collection, final Document index, @Nullable final Timeout timeout) {
+        return collectionWithTimeoutDeferred(collection, timeout).flatMap(wrappedCollection ->
+             Mono.from(clientSession == null ? wrappedCollection.createIndex(index) : wrappedCollection.createIndex(clientSession, index))
+        );
     }
 
-    private Mono<Long> createSaveChunksMono(final AtomicBoolean terminated) {
+    private Mono<Long> createSaveChunksMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) {
         return Mono.create(sink -> {
             AtomicLong lengthInBytes = new AtomicLong(0);
             AtomicInteger chunkIndex = new AtomicInteger(0);
             new ResizingByteBufferFlux(source, chunkSizeBytes)
+                    .takeUntilOther(createMonoTimer(timeout))
                     .flatMap((Function<ByteBuffer, Publisher<InsertOneResult>>) byteBuffer -> {
                         if (terminated.get()) {
                             return Mono.empty();
@@ -246,36 +252,64 @@ private Mono<Long> createSaveChunksMono(final AtomicBoolean terminated) {
                                 .append("n", chunkIndex.getAndIncrement())
                                 .append("data", data);
 
-                        return clientSession == null ? chunksCollection.insertOne(chunkDocument)
-                                : chunksCollection.insertOne(clientSession, chunkDocument);
+                        if (clientSession == null) {
+                            return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(chunkDocument);
+                        } else {
+                            return collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(clientSession,
+                                    chunkDocument);
+                        }
+
                     })
                     .subscribe(null, sink::error, () -> sink.success(lengthInBytes.get()));
         });
     }
 
-    private Mono<InsertOneResult> createSaveFileDataMono(final AtomicBoolean terminated, final long lengthInBytes) {
+    /**
+     * Creates a Mono that emits a {@link MongoOperationTimeoutException} after the specified timeout.
+     *
+     * @param timeout - remaining timeout.
+     * @return Mono that emits a {@link MongoOperationTimeoutException}.
+     */
+    private static Mono<MongoOperationTimeoutException> createMonoTimer(final @Nullable Timeout timeout) {
+        return Timeout.nullAsInfinite(timeout).call(MILLISECONDS,
+                () -> Mono.never(),
+                (ms) -> Mono.delay(ofMillis(ms)).then(createTimeoutMonoError()),
+                () -> createTimeoutMonoError());
+    }
+
+    private static Mono<MongoOperationTimeoutException> createTimeoutMonoError() {
+        return Mono.error(TimeoutContext.createMongoTimeoutException(
+                "GridFS waiting for data from the source Publisher exceeded the timeout limit."));
+    }
+
+    private Mono<InsertOneResult> createSaveFileDataMono(final AtomicBoolean terminated,
+                                                         final long lengthInBytes,
+                                                         @Nullable final Timeout timeout) {
+        Mono<MongoCollection<GridFSFile>> filesCollectionMono = collectionWithTimeoutDeferred(filesCollection, timeout);
         if (terminated.compareAndSet(false, true)) {
             GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(), metadata);
             if (clientSession != null) {
-                return Mono.from(filesCollection.insertOne(clientSession, gridFSFile));
+                return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(clientSession, gridFSFile)));
             } else {
-                return Mono.from(filesCollection.insertOne(gridFSFile));
+                return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(gridFSFile)));
             }
         } else {
             return Mono.empty();
         }
     }
 
-    private Mono<DeleteResult> createCancellationMono(final AtomicBoolean terminated) {
+    private Mono<DeleteResult> createCancellationMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) {
+        Mono<MongoCollection<Document>> chunksCollectionMono = collectionWithTimeoutDeferred(chunksCollection, timeout);
         if (terminated.compareAndSet(false, true)) {
             if (clientSession != null) {
-                return Mono.from(chunksCollection.deleteMany(clientSession, new Document("files_id", fileId)));
+                return chunksCollectionMono.flatMap(collection -> Mono.from(collection
+                        .deleteMany(clientSession, new Document("files_id", fileId))));
             } else {
-                return Mono.from(chunksCollection.deleteMany(new Document("files_id", fileId)));
+                return chunksCollectionMono.flatMap(collection -> Mono.from(collection
+                        .deleteMany(new Document("files_id", fileId))));
             }
         } else {
             return Mono.empty();
         }
     }
-
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java
index b6c3cb73c61..5ae7f4815e5 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java
@@ -32,7 +32,10 @@
 import com.mongodb.client.model.vault.RewrapManyDataKeyOptions;
 import com.mongodb.client.model.vault.RewrapManyDataKeyResult;
 import com.mongodb.client.result.DeleteResult;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.FindPublisher;
 import com.mongodb.reactivestreams.client.MongoClient;
 import com.mongodb.reactivestreams.client.MongoClients;
@@ -61,15 +64,22 @@
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE;
 import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred;
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.internal.BsonUtil.mutableDeepCopy;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
  */
 public class ClientEncryptionImpl implements ClientEncryption {
+    private static final String TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY = "Creating data key exceeded the timeout limit.";
+    private static final String TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY = "Rewrapping data key exceeded the timeout limit.";
+    private static final String TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION = "Encryption collection creation exceeded the timeout limit.";
     private final Crypt crypt;
     private final ClientEncryptionSettings options;
     private final MongoClient keyVaultClient;
@@ -85,10 +95,22 @@ public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryp
         this.crypt = Crypts.create(keyVaultClient, options);
         this.options = options;
         MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace());
-        this.collection = keyVaultClient.getDatabase(namespace.getDatabaseName())
+        this.collection = getVaultCollection(keyVaultClient, options, namespace);
+    }
+
+    private static MongoCollection<BsonDocument> getVaultCollection(final MongoClient keyVaultClient,
+                                                                                final ClientEncryptionSettings options,
+                                                                                final MongoNamespace namespace) {
+        MongoCollection<BsonDocument> vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName())
                 .getCollection(namespace.getCollectionName(), BsonDocument.class)
                 .withWriteConcern(WriteConcern.MAJORITY)
                 .withReadConcern(ReadConcern.MAJORITY);
+
+        Long timeoutMs = options.getTimeout(MILLISECONDS);
+        if (timeoutMs != null){
+            vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS);
+        }
+        return vaultCollection;
     }
 
     @Override
@@ -98,30 +120,47 @@ public Publisher<BsonBinary> createDataKey(final String kmsProvider) {
 
     @Override
     public Publisher<BsonBinary> createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) {
-        return crypt.createDataKey(kmsProvider, dataKeyOptions)
+        return Mono.defer(() -> {
+            Timeout operationTimeout = startTimeout();
+             return createDataKey(kmsProvider, dataKeyOptions, operationTimeout);
+        });
+    }
+
+    public Mono<BsonBinary> createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) {
+        return crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout)
                 .flatMap(dataKeyDocument -> {
                     MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace());
-                    return Mono.from(keyVaultClient.getDatabase(namespace.getDatabaseName())
-                                             .getCollection(namespace.getCollectionName(), BsonDocument.class)
-                                             .withWriteConcern(WriteConcern.MAJORITY)
-                                             .insertOne(dataKeyDocument))
+
+                    MongoCollection<BsonDocument> vaultCollection = keyVaultClient
+                            .getDatabase(namespace.getDatabaseName())
+                            .getCollection(namespace.getCollectionName(), BsonDocument.class)
+                            .withWriteConcern(WriteConcern.MAJORITY);
+                    return Mono.from(collectionWithTimeout(vaultCollection, operationTimeout, TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY)
+                                    .insertOne(dataKeyDocument))
                             .map(i -> dataKeyDocument.getBinary("_id"));
                 });
     }
 
     @Override
     public Publisher<BsonBinary> encrypt(final BsonValue value, final EncryptOptions options) {
-        return crypt.encryptExplicitly(value, options);
+        notNull("value", value);
+        notNull("options", options);
+
+        return Mono.defer(() -> crypt.encryptExplicitly(value, options, startTimeout()));
     }
 
     @Override
     public Publisher<BsonDocument> encryptExpression(final Bson expression, final EncryptOptions options) {
-        return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options);
+        return Mono.defer(() -> crypt.encryptExpression(
+                expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()),
+                options,
+                startTimeout()));
     }
 
     @Override
     public Publisher<BsonValue> decrypt(final BsonBinary value) {
-        return crypt.decryptExplicitly(value);
+        notNull("value", value);
+        return Mono.defer(() -> crypt.decryptExplicitly(value, startTimeout()));
     }
 
     @Override
@@ -180,8 +219,10 @@ public Publisher<RewrapManyDataKeyResult> rewrapManyDataKey(final Bson filter) {
 
     @Override
     public Publisher<RewrapManyDataKeyResult> rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) {
-        return Mono.fromRunnable(() -> validateRewrapManyDataKeyOptions(options)).then(
-                crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options)
+        return Mono.fromRunnable(() -> validateRewrapManyDataKeyOptions(options))
+                .then(Mono.defer(()-> {
+                    Timeout operationTimeout = startTimeout();
+                    return  crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options, operationTimeout)
                         .flatMap(results -> {
                             if (results.isEmpty()) {
                                 return Mono.fromCallable(RewrapManyDataKeyResult::new);
@@ -195,8 +236,10 @@ public Publisher<RewrapManyDataKeyResult> rewrapManyDataKey(final Bson filter, f
                                                 Updates.currentDate("updateDate"))
                                 );
                             }).collect(Collectors.toList());
-                            return Mono.from(collection.bulkWrite(updateModels)).map(RewrapManyDataKeyResult::new);
-                        }));
+                            return Mono.from(collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY)
+                                            .bulkWrite(updateModels)).map(RewrapManyDataKeyResult::new);
+                        });
+                }));
     }
 
     @Override
@@ -222,6 +265,7 @@ public Publisher<BsonDocument> createEncryptedCollection(final MongoDatabase dat
             }
             String keyIdBsonKey = "keyId";
             return Mono.defer(() -> {
+                Timeout operationTimeout = startTimeout();
                 // `Mono.defer` results in `maybeUpdatedEncryptedFields` and `dataKeyMightBeCreated` (mutable state)
                 // being created once per `Subscriber`, which allows the produced `Mono` to support multiple `Subscribers`.
                 BsonDocument maybeUpdatedEncryptedFields = mutableDeepCopy(encryptedFields);
@@ -233,7 +277,7 @@ public Publisher<BsonDocument> createEncryptedCollection(final MongoDatabase dat
                         .filter(field -> field.containsKey(keyIdBsonKey))
                         .filter(field -> Objects.equals(field.get(keyIdBsonKey), BsonNull.VALUE))
                         // here we rely on the `createDataKey` publisher being cold, i.e., doing nothing until it is subscribed to
-                        .map(field -> Mono.fromDirect(createDataKey(kmsProvider, dataKeyOptions))
+                        .map(field -> Mono.fromDirect(createDataKey(kmsProvider, dataKeyOptions, operationTimeout))
                                 // This is the closest we can do with reactive streams to setting the `dataKeyMightBeCreated` flag
                                 // immediately before calling `createDataKey`.
                                 .doOnSubscribe(subscription -> dataKeyMightBeCreated.set(true))
@@ -255,8 +299,10 @@ public Publisher<BsonDocument> createEncryptedCollection(final MongoDatabase dat
                         //
                         // Similarly, the `Subscriber` of the returned `Publisher` is guaranteed to observe all those write actions
                         // via the `maybeUpdatedEncryptedFields` reference, which is emitted as a result of `thenReturn`.
-                        .thenEmpty(Mono.defer(() -> Mono.fromDirect(database.createCollection(collectionName,
-                                new CreateCollectionOptions(createCollectionOptions).encryptedFields(maybeUpdatedEncryptedFields))))
+                        .thenEmpty(Mono.defer(() -> Mono.fromDirect(databaseWithTimeout(database,
+                                TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION, operationTimeout)
+                                .createCollection(collectionName, new CreateCollectionOptions(createCollectionOptions)
+                                        .encryptedFields(maybeUpdatedEncryptedFields))))
                         )
                         .onErrorMap(e -> dataKeyMightBeCreated.get(), e ->
                                 new MongoUpdatedEncryptedFieldsException(maybeUpdatedEncryptedFields,
@@ -265,7 +311,9 @@ public Publisher<BsonDocument> createEncryptedCollection(final MongoDatabase dat
                         .thenReturn(maybeUpdatedEncryptedFields);
             });
         } else {
-            return Mono.fromDirect(database.createCollection(collectionName, createCollectionOptions))
+            return databaseWithTimeoutDeferred(database, startTimeout())
+                    .flatMap(wrappedDatabase -> Mono.fromDirect(wrappedDatabase
+                            .createCollection(collectionName, createCollectionOptions)))
                     .thenReturn(encryptedFields);
         }
     }
@@ -275,4 +323,9 @@ public void close() {
         keyVaultClient.close();
         crypt.close();
     }
+
+    @Nullable
+    private Timeout startTimeout() {
+        return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS));
+    }
 }
diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java
index 06d5f713019..37d0236293b 100644
--- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java
+++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java
@@ -19,6 +19,7 @@
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.MongoUpdatedEncryptedFieldsException;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.CreateEncryptedCollectionParams;
 import com.mongodb.client.model.vault.DataKeyOptions;
@@ -108,7 +109,7 @@ public interface ClientEncryption extends Closeable {
      * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption
      * @mongodb.driver.manual reference/operator/aggregation/match/ $match
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
     Publisher<BsonDocument> encryptExpression(Bson expression, EncryptOptions options);
 
     /**
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java
index 36b09c21add..394ca1745e3 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionTest.java
@@ -57,6 +57,7 @@ public void shouldPassAllOutcomes() {
 
     @After
     public void cleanUp() {
+        super.cleanUp();
         if (mongoClient != null) {
             mongoClient.close();
         }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java
new file mode 100644
index 00000000000..75a19536cb7
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client;
+
+import com.mongodb.ClusterFixture;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.MongoCommandException;
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoSocketReadTimeoutException;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.AbstractClientSideOperationsTimeoutProseTest;
+import com.mongodb.client.model.CreateCollectionOptions;
+import com.mongodb.client.model.changestream.FullDocument;
+import com.mongodb.event.CommandFailedEvent;
+import com.mongodb.event.CommandStartedEvent;
+import com.mongodb.reactivestreams.client.gridfs.GridFSBucket;
+import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets;
+import com.mongodb.reactivestreams.client.syncadapter.SyncGridFSBucket;
+import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient;
+import org.bson.BsonDocument;
+import org.bson.BsonTimestamp;
+import org.bson.Document;
+import org.bson.types.ObjectId;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Hooks;
+import reactor.test.StepVerifier;
+
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
+import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless;
+import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet;
+import static com.mongodb.ClusterFixture.isServerlessTest;
+import static com.mongodb.ClusterFixture.serverVersionAtLeast;
+import static com.mongodb.ClusterFixture.sleep;
+import static java.util.Collections.singletonList;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+
+/**
+ * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#prose-tests
+ */
+public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest {
+    private MongoClient wrapped;
+
+    @Override
+    protected com.mongodb.client.MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) {
+        wrapped = createReactiveClient(mongoClientSettings);
+        return new SyncMongoClient(wrapped);
+    }
+
+    private static MongoClient createReactiveClient(final MongoClientSettings.Builder builder) {
+        return MongoClients.create(builder.build());
+    }
+
+    private static MongoClient createReactiveClient(final MongoClientSettings mongoClientSettings) {
+        return MongoClients.create(mongoClientSettings);
+    }
+
+    @Override
+    protected com.mongodb.client.gridfs.GridFSBucket createGridFsBucket(final com.mongodb.client.MongoDatabase mongoDatabase,
+                                                                        final String bucketName) {
+        return new SyncGridFSBucket(GridFSBuckets.create(wrapped.getDatabase(mongoDatabase.getName()), bucketName));
+    }
+
+    private GridFSBucket createReaciveGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) {
+        return GridFSBuckets.create(mongoDatabase, bucketName);
+    }
+
+    @Override
+    protected boolean isAsync() {
+        return true;
+    }
+
+    @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out")
+    @Test
+    @Override
+    public void testGridFSUploadViaOpenUploadStreamTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        //given
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(405))
+                + "  }"
+                + "}");
+
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + applyTimeoutMultiplierForServerless(400), TimeUnit.MILLISECONDS))) {
+            MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName());
+            GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME);
+
+
+            TestEventPublisher<ByteBuffer> eventPublisher = new TestEventPublisher<>();
+            TestSubscriber<ObjectId> testSubscriber = new TestSubscriber<>();
+
+            gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream())
+                    .subscribe(testSubscriber);
+
+            //when
+            eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x12}));
+            testSubscriber.requestMore(1);
+            /*
+             By prose spec definition we have to close GridFSUploadStream when we don't have more data to submit and want to flux internal buffers.
+              However, in Reactive streams that would be equivalent to calling propagating complete signal from the source publisher.
+            */
+            eventPublisher.complete();
+
+            //then
+            testSubscriber.assertTerminalEvent();
+
+            List<Throwable> onErrorEvents = testSubscriber.getOnErrorEvents();
+            assertEquals(1, onErrorEvents.size());
+
+            Throwable commandError = onErrorEvents.get(0);
+            Throwable operationTimeoutErrorCause = commandError.getCause();
+            assertInstanceOf(MongoOperationTimeoutException.class, commandError);
+            assertInstanceOf(MongoSocketReadTimeoutException.class, operationTimeoutErrorCause);
+
+            CommandFailedEvent chunkInsertFailedEvent = commandListener.getCommandFailedEvent("insert");
+            assertNotNull(chunkInsertFailedEvent);
+            assertEquals(commandError, commandListener.getCommandFailedEvent("insert").getThrowable());
+        }
+    }
+
+    @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out")
+    @Test
+    @Override
+    public void testAbortingGridFsUploadStreamTimeout() throws ExecutionException, InterruptedException, TimeoutException {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        //given
+        CompletableFuture<Throwable> droppedErrorFuture = new CompletableFuture<>();
+        Hooks.onErrorDropped(droppedErrorFuture::complete);
+
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"delete\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(405))
+                + "  }"
+                + "}");
+
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + applyTimeoutMultiplierForServerless(400), TimeUnit.MILLISECONDS))) {
+            MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName());
+            GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME);
+
+
+            TestEventPublisher<ByteBuffer> eventPublisher = new TestEventPublisher<>();
+            TestSubscriber<ObjectId> testSubscriber = new TestSubscriber<>();
+
+            gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream())
+                    .subscribe(testSubscriber);
+
+            //when
+            eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x01, 0x02, 0x03, 0x04}));
+            testSubscriber.requestMore(1);
+            /*
+             By prose spec definition we have to abort GridFSUploadStream.
+              However, in Reactive streams that would be equivalent to calling subscription to propagate cancellation signal.
+            */
+            testSubscriber.cancelSubscription();
+
+            //then
+            Throwable droppedError = droppedErrorFuture.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS);
+            Throwable commandError = droppedError.getCause();
+            Throwable operationTimeoutErrorCause = commandError.getCause();
+
+            assertInstanceOf(MongoOperationTimeoutException.class, commandError);
+            assertInstanceOf(MongoSocketReadTimeoutException.class, operationTimeoutErrorCause);
+
+            CommandFailedEvent deleteFailedEvent = commandListener.getCommandFailedEvent("delete");
+            assertNotNull(deleteFailedEvent);
+
+            assertEquals(commandError, commandListener.getCommandFailedEvent("delete").getThrowable());
+            // When subscription is cancelled, we should not receive any more events.
+            testSubscriber.assertNoTerminalEvent();
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("TimeoutMS applies to full resume attempt in a next call")
+    @Test
+    public void testTimeoutMSAppliesToFullResumeAttemptInNextCall() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+
+        //given
+        long rtt = ClusterFixture.getPrimaryRTT();
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + 500, TimeUnit.MILLISECONDS))) {
+
+            MongoNamespace namespace = generateNamespace();
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+
+            collectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"failCommand\","
+                    + "    mode: { times: 1},"
+                    + "    data: {"
+                    + "        failCommands: [\"getMore\" ],"
+                    + "        errorCode: 7,"
+                    + "        errorLabels: [\"ResumableChangeStreamError\" ]"
+                    + "    }"
+                    + "}");
+
+            //when
+            ChangeStreamPublisher<Document> documentChangeStreamPublisher = collection.watch(
+                            singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}")));
+
+            Assertions.assertThrows(MongoOperationTimeoutException.class,
+                    () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION));
+            //then
+            sleep(200); //let publisher invalidate the cursor after the error.
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+
+            List<String> expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors", "aggregate", "getMore", "killCursors");
+            assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents);
+
+            List<CommandFailedEvent> commandFailedEvents = commandListener.getCommandFailedEvents();
+            assertEquals(2, commandFailedEvents.size());
+
+            CommandFailedEvent firstGetMoreFailedEvent = commandFailedEvents.get(0);
+            assertEquals("getMore", firstGetMoreFailedEvent.getCommandName());
+            assertInstanceOf(MongoCommandException.class, firstGetMoreFailedEvent.getThrowable());
+
+            CommandFailedEvent secondGetMoreFailedEvent = commandFailedEvents.get(1);
+            assertEquals("getMore", secondGetMoreFailedEvent.getCommandName());
+            assertInstanceOf(MongoOperationTimeoutException.class, secondGetMoreFailedEvent.getThrowable());
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("TimeoutMS applied to initial aggregate")
+    @Test
+    public void testTimeoutMSAppliedToInitialAggregate() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+
+        //given
+        long rtt = ClusterFixture.getPrimaryRTT();
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + 200, TimeUnit.MILLISECONDS))) {
+
+            MongoNamespace namespace = generateNamespace();
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+            ChangeStreamPublisher<Document> documentChangeStreamPublisher = collection.watch(
+                            singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}")))
+                    .fullDocument(FullDocument.UPDATE_LOOKUP);
+
+            collectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"failCommand\","
+                    + "    mode: { times: 1},"
+                    + "    data: {"
+                    + "        failCommands: [\"aggregate\" ],"
+                    + "        blockConnection: true,"
+                    + "        blockTimeMS: " + (rtt + 201)
+                    + "    }"
+                    + "}");
+
+            //when
+            Assertions.assertThrows(MongoOperationTimeoutException.class,
+                    () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION));
+
+            //We do not expect cursor to have been created. However, publisher closes cursor asynchronously, thus we give it some time
+            // to make sure that cursor has not been closed (which would indicate that it was created).
+            sleep(200);
+
+            //then
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            assertEquals(1, commandStartedEvents.size());
+            assertEquals("aggregate", commandStartedEvents.get(0).getCommandName());
+            assertOnlyOneCommandTimeoutFailure("aggregate");
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is not set")
+    @Test
+    public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsNotSet() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+
+        //given
+        BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0);
+        collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions());
+        sleep(2000);
+
+
+        long rtt = ClusterFixture.getPrimaryRTT();
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + 300, TimeUnit.MILLISECONDS))) {
+
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+
+            collectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"failCommand\","
+                    + "    mode: { times: 3},"
+                    + "    data: {"
+                    + "        failCommands: [\"getMore\", \"aggregate\"],"
+                    + "        blockConnection: true,"
+                    + "        blockTimeMS: " + (rtt + 200)
+                    + "    }"
+                    + "}");
+
+            collectionHelper.insertDocuments(WriteConcern.MAJORITY,
+                    BsonDocument.parse("{x: 1}"),
+                    BsonDocument.parse("{x: 2}"),
+
+                    BsonDocument.parse("{x: 3}"),
+                    BsonDocument.parse("{x: 4}"),
+
+                    BsonDocument.parse("{x: 5}"),
+                    BsonDocument.parse("{x: 6}"));
+
+            //when
+            ChangeStreamPublisher<Document> documentChangeStreamPublisher = collection.watch()
+                    .startAtOperationTime(startTime);
+            StepVerifier.create(documentChangeStreamPublisher, 2)
+            //then
+                    .expectNextCount(2)
+                    .thenAwait(Duration.ofMillis(300))
+                    .thenRequest(2)
+                    .expectNextCount(2)
+                    .thenAwait(Duration.ofMillis(300))
+                    .thenRequest(2)
+                    .expectNextCount(2)
+                    .thenAwait(Duration.ofMillis(300))
+                    .thenRequest(2)
+                    .expectError(MongoOperationTimeoutException.class)
+                    .verify();
+
+            sleep(500); //let publisher invalidate the cursor after the error.
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            List<String> expectedCommandNames = Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors");
+            assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents);
+            assertOnlyOneCommandTimeoutFailure("getMore");
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is set")
+    @Test
+    public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsSet() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+
+        //given
+        BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0);
+        collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions());
+        sleep(2000);
+
+        long rtt = ClusterFixture.getPrimaryRTT();
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + 300, TimeUnit.MILLISECONDS))) {
+
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName())
+                    .withReadPreference(ReadPreference.primary());
+
+            collectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"failCommand\","
+                    + "    mode: { times: 2},"
+                    + "    data: {"
+                    + "        failCommands: [\"aggregate\", \"getMore\"],"
+                    + "        blockConnection: true,"
+                    + "        blockTimeMS: " + (rtt + 200)
+                    + "    }"
+                    + "}");
+
+
+            collectionHelper.insertDocuments(WriteConcern.MAJORITY,
+                    BsonDocument.parse("{x: 1}"),
+                    BsonDocument.parse("{x: 2}"),
+
+                    BsonDocument.parse("{x: 3}"),
+                    BsonDocument.parse("{x: 4}"));
+
+            //when
+            ChangeStreamPublisher<Document> documentChangeStreamPublisher = collection.watch()
+                    .maxAwaitTime(1, TimeUnit.MILLISECONDS)
+                    .startAtOperationTime(startTime);
+            StepVerifier.create(documentChangeStreamPublisher, 2)
+            //then
+                    .expectNextCount(2)
+                    .thenAwait(Duration.ofMillis(600))
+                    .thenRequest(2)
+                    .expectNextCount(2)
+                    .thenCancel()
+                    .verify();
+
+            sleep(500); //let publisher invalidate the cursor after the error.
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            List<String> expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors");
+            assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents);
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("TimeoutMS is honored for next operation when several getMore executed internally")
+    @Test
+    public void testTimeoutMsISHonoredForNnextOperationWhenSeveralGetMoreExecutedInternally() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+
+        //given
+        long rtt = ClusterFixture.getPrimaryRTT();
+        try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + 2500, TimeUnit.MILLISECONDS))) {
+
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+
+            //when
+            ChangeStreamPublisher<Document> documentChangeStreamPublisher = collection.watch();
+            StepVerifier.create(documentChangeStreamPublisher, 2)
+            //then
+                    .expectError(MongoOperationTimeoutException.class)
+                    .verify();
+
+            sleep(200); //let publisher invalidate the cursor after the error.
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            assertCommandStartedEventsInOder(Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors"),
+                    commandStartedEvents);
+            assertOnlyOneCommandTimeoutFailure("getMore");
+        }
+    }
+
+    private static void assertCommandStartedEventsInOder(final List<String> expectedCommandNames,
+                                                         final List<CommandStartedEvent> commandStartedEvents) {
+        assertEquals(expectedCommandNames.size(), commandStartedEvents.size(), "Expected: " + expectedCommandNames + ". Actual: "
+                + commandStartedEvents.stream()
+                        .map(CommandStartedEvent::getCommand)
+                        .map(BsonDocument::toJson)
+                        .collect(Collectors.toList()));
+
+        for (int i = 0; i < expectedCommandNames.size(); i++) {
+            CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i);
+
+            assertEquals(expectedCommandNames.get(i), commandStartedEvent.getCommandName());
+        }
+    }
+
+    private void assertOnlyOneCommandTimeoutFailure(final String command) {
+        List<CommandFailedEvent> commandFailedEvents = commandListener.getCommandFailedEvents();
+        assertEquals(1, commandFailedEvents.size());
+
+        CommandFailedEvent failedAggregateCommandEvent = commandFailedEvents.get(0);
+        assertEquals(command, commandFailedEvents.get(0).getCommandName());
+        assertInstanceOf(MongoOperationTimeoutException.class, failedAggregateCommandEvent.getThrowable());
+    }
+
+    @Override
+    @BeforeEach
+    public void setUp() {
+        super.setUp();
+        SyncMongoClient.enableSleepAfterSessionClose(postSessionCloseSleep());
+    }
+
+    @Override
+    @AfterEach
+    public void tearDown() {
+        super.tearDown();
+        SyncMongoClient.disableSleep();
+    }
+
+    @Override
+    protected int postSessionCloseSleep() {
+        return 256;
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java
index e3ff5921ad2..2040e295d9a 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java
@@ -17,7 +17,6 @@
 package com.mongodb.reactivestreams.client;
 
 import com.mongodb.ReadConcern;
-import com.mongodb.event.CommandEvent;
 import com.mongodb.event.CommandStartedEvent;
 import com.mongodb.internal.connection.TestCommandListener;
 import org.bson.BsonDocument;
@@ -62,7 +61,7 @@ public void shouldIncludeReadConcernInCommand() throws InterruptedException {
                 .find())
                 .block(TIMEOUT_DURATION);
 
-        List<CommandEvent> events = commandListener.getCommandStartedEvents();
+        List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
 
         BsonDocument commandDocument = new BsonDocument("find", new BsonString("test"))
                 .append("readConcern", ReadConcern.LOCAL.asDocument())
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java
new file mode 100644
index 00000000000..b8a40529dcd
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client;
+
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Sinks;
+
+public class TestEventPublisher<T> {
+    private final Sinks.Many<T> sink;
+
+    public TestEventPublisher() {
+        this.sink = Sinks.many().unicast().onBackpressureBuffer();
+    }
+
+    // Method to send events
+    public void sendEvent(final T event) {
+        sink.tryEmitNext(event);
+    }
+
+    public Flux<T> getEventStream() {
+        return sink.asFlux();
+    }
+
+    public long currentSubscriberCount() {
+        return sink.currentSubscriberCount();
+    }
+
+    public void complete() {
+        sink.tryEmitComplete();
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java
index f6269c737ec..05411729ba7 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java
@@ -135,6 +135,10 @@ public List<T> getOnNextEvents() {
         return onNextEvents;
     }
 
+    public void cancelSubscription() {
+        subscription.cancel();
+    }
+
     /**
      * Assert that a particular sequence of items was received by this {@link Subscriber} in order.
      *
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java
new file mode 100644
index 00000000000..5df9c571dbe
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.csot;
+
+
+import com.mongodb.ClientEncryptionSettings;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.client.MongoClient;
+import com.mongodb.client.csot.AbstractClientSideOperationsEncryptionTimeoutProseTest;
+import com.mongodb.client.vault.ClientEncryption;
+import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption;
+import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient;
+import com.mongodb.reactivestreams.client.vault.ClientEncryptions;
+
+public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest {
+    public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) {
+        return new SyncClientEncryption(ClientEncryptions.create(builder.build()));
+    }
+
+    @Override
+    protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) {
+        return new SyncMongoClient(com.mongodb.reactivestreams.client.MongoClients.create(builder.build()));
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java
index 410dfd02fc4..ebbd2069f70 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java
@@ -373,7 +373,7 @@ public void testBatchCursorReportsCursorErrors() {
 
         BsonDocument getMoreCommand = commandListener.getCommandStartedEvents().stream()
                 .filter(e -> e.getCommandName().equals("getMore"))
-                .map(e -> ((CommandStartedEvent) e).getCommand())
+                .map(CommandStartedEvent::getCommand)
                 .findFirst()
                 .get();
 
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java
index 8e7b1af1bc9..102b96e424f 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java
@@ -18,8 +18,10 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.async.AsyncBatchCursor;
 import com.mongodb.internal.async.SingleResultCallback;
+import com.mongodb.internal.operation.AsyncOperations;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import org.bson.Document;
 import org.junit.jupiter.api.Test;
@@ -36,6 +38,7 @@
 import java.util.Queue;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
 import java.util.stream.IntStream;
 
 import static com.mongodb.reactivestreams.client.internal.TestHelper.OPERATION_EXECUTOR;
@@ -169,6 +172,11 @@ BatchCursorPublisher<Document> createVerifiableBatchCursor(final List<Document>
             AsyncReadOperation<AsyncBatchCursor<Document>> asAsyncReadOperation(final int initialBatchSize) {
                 return readOperation;
             }
+
+            @Override
+            Function<AsyncOperations<?>, TimeoutSettings> getTimeoutSettings() {
+                return (AsyncOperations::getTimeoutSettings);
+            }
         };
 
         OperationExecutor executor = OPERATION_EXECUTOR;
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java
index 21c0921225a..6b81b1f42af 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java
@@ -17,6 +17,7 @@
 
 import com.mongodb.ExplainVerbosity;
 import com.mongodb.client.AggregateIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.AggregatePublisher;
@@ -111,6 +112,12 @@ public AggregateIterable<T> let(final Bson variables) {
         return this;
     }
 
+    @Override
+    public AggregateIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public Document explain() {
         return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION));
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java
index 36aff9506ed..494e5f8c74e 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java
@@ -21,6 +21,7 @@
 import com.mongodb.TransactionOptions;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.TransactionBody;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.lang.Nullable;
 import com.mongodb.session.ServerSession;
 import org.bson.BsonDocument;
@@ -182,6 +183,11 @@ public <T> T withTransaction(final TransactionBody<T> transactionBody, final Tra
         throw new UnsupportedOperationException();
     }
 
+    @Override
+    public TimeoutContext getTimeoutContext() {
+        return wrapped.getTimeoutContext();
+    }
+
     private static void sleep(final long millis) {
         try {
             Thread.sleep(millis);
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java
index 1f4594270f9..7f50727621d 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java
@@ -17,6 +17,7 @@
 package com.mongodb.reactivestreams.client.syncadapter;
 
 import com.mongodb.client.DistinctIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.DistinctPublisher;
@@ -69,4 +70,10 @@ public DistinctIterable<T> comment(@Nullable final BsonValue comment) {
         wrapped.comment(comment);
         return this;
     }
+
+    @Override
+    public DistinctIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java
index 0cc68b0042e..3cf93b9ffb0 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java
@@ -19,6 +19,7 @@
 import com.mongodb.CursorType;
 import com.mongodb.ExplainVerbosity;
 import com.mongodb.client.FindIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.FindPublisher;
@@ -174,6 +175,12 @@ public FindIterable<T> allowDiskUse(@Nullable final java.lang.Boolean allowDiskU
         return this;
     }
 
+    @Override
+    public FindIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public Document explain() {
         return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION));
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java
index a09b4ffbec3..48b28e5540a 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java
@@ -42,6 +42,7 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
 import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
@@ -79,6 +80,11 @@ public ReadConcern getReadConcern() {
         return wrapped.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return wrapped.getTimeout(timeUnit);
+    }
+
     @Override
     public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) {
         return new SyncGridFSBucket(wrapped.withChunkSizeBytes(chunkSizeBytes));
@@ -99,6 +105,11 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) {
         return new SyncGridFSBucket(wrapped.withReadConcern(readConcern));
     }
 
+    @Override
+    public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new SyncGridFSBucket(wrapped.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public GridFSUploadStream openUploadStream(final String filename) {
         return openUploadStream(filename, new GridFSUploadOptions());
@@ -197,7 +208,7 @@ public GridFSDownloadStream openDownloadStream(final ObjectId id) {
 
     @Override
     public GridFSDownloadStream openDownloadStream(final BsonValue id) {
-        throw new UnsupportedOperationException();
+        return new SyncGridFSDownloadStream(wrapped.downloadToPublisher(id));
     }
 
     @Override
@@ -279,17 +290,17 @@ public GridFSFindIterable find() {
 
     @Override
     public GridFSFindIterable find(final Bson filter) {
-        throw new UnsupportedOperationException();
+        return new SyncGridFSFindIterable(wrapped.find(filter));
     }
 
     @Override
     public GridFSFindIterable find(final ClientSession clientSession) {
-        throw new UnsupportedOperationException();
+        return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession)));
     }
 
     @Override
     public GridFSFindIterable find(final ClientSession clientSession, final Bson filter) {
-        throw new UnsupportedOperationException();
+        return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession), filter));
     }
 
     @Override
@@ -334,12 +345,16 @@ public void rename(final ClientSession clientSession, final BsonValue id, final
 
     @Override
     public void drop() {
-        Mono.from(wrapped.drop()).contextWrite(CONTEXT).block(TIMEOUT_DURATION);
+        Mono.from(wrapped.drop())
+                .contextWrite(CONTEXT)
+                .block(TIMEOUT_DURATION);
     }
 
     @Override
     public void drop(final ClientSession clientSession) {
-        Mono.from(wrapped.drop(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION);
+        Mono.from(wrapped.drop())
+                .contextWrite(CONTEXT)
+                .block(TIMEOUT_DURATION);
     }
 
     private void toOutputStream(final GridFSDownloadPublisher downloadPublisher, final OutputStream destination) {
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java
new file mode 100644
index 00000000000..b3217b8f47d
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.syncadapter;
+
+import com.mongodb.MongoGridFSException;
+import com.mongodb.client.gridfs.GridFSDownloadStream;
+import com.mongodb.client.gridfs.model.GridFSFile;
+import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher;
+import reactor.core.publisher.Flux;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
+import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
+import static java.util.Objects.requireNonNull;
+
+public class SyncGridFSDownloadStream extends GridFSDownloadStream {
+    private final AtomicBoolean closed = new AtomicBoolean(false);
+    private ByteBuffer byteBuffer;
+    private final GridFSDownloadPublisher wrapped;
+
+    public SyncGridFSDownloadStream(final GridFSDownloadPublisher publisher) {
+       this.wrapped = publisher;
+       this.byteBuffer = ByteBuffer.allocate(0);
+    }
+
+    @Override
+    public GridFSFile getGridFSFile() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public GridFSDownloadStream batchSize(final int batchSize) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public int read() {
+        checkClosed();
+        readAll();
+
+        return byteBuffer.get();
+    }
+
+    @Override
+    public int read(final byte[] b) {
+        checkClosed();
+        readAll();
+        int remaining = byteBuffer.remaining();
+        byteBuffer.get(b);
+        return remaining - byteBuffer.remaining();
+    }
+
+    @Override
+    public int read(final byte[] b, final int off, final int len) {
+        checkClosed();
+        readAll();
+        int remaining = byteBuffer.remaining();
+        byteBuffer.get(b, off, len);
+        return remaining - byteBuffer.remaining();
+    }
+
+    @Override
+    public long skip(final long n) {
+        checkClosed();
+        readAll();
+        int position = byteBuffer.position();
+        long min = Math.min(position, n);
+        byteBuffer.position((int) min);
+        return min;
+    }
+
+    @Override
+    public int available() {
+        checkClosed();
+        readAll();
+        return byteBuffer.remaining();
+    }
+
+    @Override
+    public void mark() {
+        checkClosed();
+        readAll();
+        byteBuffer.mark();
+    }
+
+    @Override
+    public void reset() {
+        checkClosed();
+        readAll();
+        byteBuffer.reset();
+    }
+
+    @Override
+    public void close() {
+        closed.set(true);
+    }
+
+    private void readAll() {
+        List<ByteBuffer> byteBuffers = requireNonNull(Flux
+                .from(wrapped).contextWrite(CONTEXT).collectList().block((TIMEOUT_DURATION)));
+
+       byteBuffer = byteBuffers.stream().reduce((byteBuffer1, byteBuffer2) -> {
+            byteBuffer1.put(byteBuffer2);
+            return byteBuffer1;
+        }).orElseThrow(() -> new IllegalStateException("No data found"));
+    }
+
+    private void checkClosed() {
+        if (closed.get()) {
+            throw new MongoGridFSException("The DownloadStream has been closed");
+        }
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java
new file mode 100644
index 00000000000..1021e6bc102
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.syncadapter;
+
+import com.mongodb.client.gridfs.GridFSFindIterable;
+import com.mongodb.client.gridfs.model.GridFSFile;
+import com.mongodb.client.model.Collation;
+import com.mongodb.lang.Nullable;
+import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher;
+import org.bson.conversions.Bson;
+
+import java.util.concurrent.TimeUnit;
+
+class SyncGridFSFindIterable extends SyncMongoIterable<GridFSFile> implements GridFSFindIterable {
+    private final GridFSFindPublisher wrapped;
+
+    SyncGridFSFindIterable(final GridFSFindPublisher wrapped) {
+        super(wrapped);
+        this.wrapped = wrapped;
+    }
+
+    @Override
+    public GridFSFindIterable filter(@Nullable final Bson filter) {
+        wrapped.filter(filter);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable limit(final int limit) {
+        wrapped.limit(limit);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable skip(final int skip) {
+        wrapped.skip(skip);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable maxTime(final long maxTime, final TimeUnit timeUnit) {
+        wrapped.maxTime(maxTime, timeUnit);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable sort(@Nullable final Bson sort) {
+        wrapped.sort(sort);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable noCursorTimeout(final boolean noCursorTimeout) {
+        wrapped.noCursorTimeout(noCursorTimeout);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable batchSize(final int batchSize) {
+        wrapped.batchSize(batchSize);
+        super.batchSize(batchSize);
+        return this;
+    }
+
+    @Override
+    public GridFSFindIterable collation(@Nullable final Collation collation) {
+        wrapped.collation(collation);
+        return this;
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java
index 5dfa3fe76d6..48d88963077 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java
@@ -17,6 +17,7 @@
 package com.mongodb.reactivestreams.client.syncadapter;
 
 import com.mongodb.client.ListCollectionsIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher;
 import org.bson.BsonValue;
@@ -62,4 +63,10 @@ public ListCollectionsIterable<T> comment(final BsonValue comment) {
         wrapped.comment(comment);
         return this;
     }
+
+    @Override
+    public ListCollectionsIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java
index 53f901e538b..4248e59c361 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java
@@ -17,6 +17,7 @@
 package com.mongodb.reactivestreams.client.syncadapter;
 
 import com.mongodb.client.ListDatabasesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher;
 import org.bson.BsonValue;
@@ -74,4 +75,10 @@ public ListDatabasesIterable<T> comment(final BsonValue comment) {
         wrapped.comment(comment);
         return this;
     }
+
+    @Override
+    public ListDatabasesIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java
index 3cec57e3ce0..947cb8f0d0f 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java
@@ -17,6 +17,7 @@
 package com.mongodb.reactivestreams.client.syncadapter;
 
 import com.mongodb.client.ListIndexesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.reactivestreams.client.ListIndexesPublisher;
 import org.bson.BsonValue;
 
@@ -54,4 +55,10 @@ public ListIndexesIterable<T> comment(final BsonValue comment) {
         wrapped.comment(comment);
         return this;
     }
+
+    @Override
+    public ListIndexesIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java
index 7efbde8d9fa..f119c645916 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.ExplainVerbosity;
 import com.mongodb.client.ListSearchIndexesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher;
 import org.bson.BsonValue;
@@ -80,6 +81,12 @@ public ListSearchIndexesIterable<T> comment(final BsonValue comment) {
         return this;
     }
 
+    @Override
+    public ListSearchIndexesIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public Document explain() {
         return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION));
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java
index 66a287cfa64..efc70b690fa 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java
@@ -16,6 +16,7 @@
 
 package com.mongodb.reactivestreams.client.syncadapter;
 
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.conversions.Bson;
@@ -106,6 +107,7 @@ public com.mongodb.client.MapReduceIterable<T> databaseName(@Nullable final Stri
         return this;
     }
 
+
     @Override
     public com.mongodb.client.MapReduceIterable<T> batchSize(final int batchSize) {
         wrapped.batchSize(batchSize);
@@ -124,4 +126,10 @@ public com.mongodb.client.MapReduceIterable<T> collation(@Nullable final Collati
         wrapped.collation(collation);
         return this;
     }
+
+    @Override
+    public com.mongodb.client.MapReduceIterable<T> timeoutMode(final TimeoutMode timeoutMode) {
+        wrapped.timeoutMode(timeoutMode);
+        return this;
+    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java
index 28d5adbdfc7..ceb5ea72769 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java
@@ -17,31 +17,27 @@
 package com.mongodb.reactivestreams.client.syncadapter;
 
 import com.mongodb.ClientSessionOptions;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
 import com.mongodb.client.ChangeStreamIterable;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.ListDatabasesIterable;
 import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCluster;
 import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.MongoIterable;
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.reactivestreams.client.internal.BatchCursor;
-import org.bson.BsonDocument;
 import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
-import reactor.core.publisher.Mono;
 
 import java.util.List;
-
-import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
-import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
-import static java.util.Objects.requireNonNull;
+import java.util.concurrent.TimeUnit;
 
 public class SyncMongoClient implements MongoClient {
 
-    private static long sleepAfterCursorOpenMS;
-
-    private static long sleepAfterCursorCloseMS;
-    private static long sleepAfterSessionCloseMS;
     private static boolean waitForBatchCursorCreation;
 
     /**
@@ -50,13 +46,17 @@ public class SyncMongoClient implements MongoClient {
      * can set this to a positive value.  A value of 256 ms has been shown to work well. The default value is 0.
      */
     public static void enableSleepAfterCursorOpen(final long sleepMS) {
-        if (sleepAfterCursorOpenMS != 0) {
-            throw new IllegalStateException("Already enabled");
-        }
-        if (sleepMS <= 0) {
-            throw new IllegalArgumentException("sleepMS must be a positive value");
-        }
-        sleepAfterCursorOpenMS = sleepMS;
+        SyncMongoCluster.enableSleepAfterCursorOpen(sleepMS);
+    }
+
+    /**
+     * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous.
+     * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before
+     * execution of the next operation can set this to a positive value.  A value of 256 ms has been shown to work well. The default
+     * value is 0.
+     */
+    public static void enableSleepAfterCursorError(final long sleepMS) {
+        SyncMongoCluster.enableSleepAfterCursorError(sleepMS);
     }
 
     /**
@@ -66,13 +66,7 @@ public static void enableSleepAfterCursorOpen(final long sleepMS) {
      * value is 0.
      */
     public static void enableSleepAfterCursorClose(final long sleepMS) {
-        if (sleepAfterCursorCloseMS != 0) {
-            throw new IllegalStateException("Already enabled");
-        }
-        if (sleepMS <= 0) {
-            throw new IllegalArgumentException("sleepMS must be a positive value");
-        }
-        sleepAfterCursorCloseMS = sleepMS;
+        SyncMongoCluster.enableSleepAfterCursorClose(sleepMS);
     }
 
     /**
@@ -81,13 +75,7 @@ public static void enableSleepAfterCursorClose(final long sleepMS) {
      * the attempt is racy and incorrect, but good enough for tests given that no other approach is available.
      */
     public static void enableSleepAfterSessionClose(final long sleepMS) {
-        if (sleepAfterSessionCloseMS != 0) {
-            throw new IllegalStateException("Already enabled");
-        }
-        if (sleepMS <= 0) {
-            throw new IllegalArgumentException("sleepMS must be a positive value");
-        }
-        sleepAfterSessionCloseMS = sleepMS;
+        SyncMongoCluster.enableSleepAfterSessionClose(sleepMS);
     }
 
     /**
@@ -112,27 +100,31 @@ public static void disableWaitForBatchCursorCreation() {
     }
 
     public static void disableSleep() {
-        sleepAfterCursorOpenMS = 0;
-        sleepAfterCursorCloseMS = 0;
-        sleepAfterSessionCloseMS = 0;
+        SyncMongoCluster.disableSleep();
     }
 
     public static long getSleepAfterCursorOpen() {
-        return sleepAfterCursorOpenMS;
+        return SyncMongoCluster.getSleepAfterCursorOpen();
+    }
+
+    public static long getSleepAfterCursorError() {
+        return SyncMongoCluster.getSleepAfterCursorError();
     }
 
     public static long getSleepAfterCursorClose() {
-        return sleepAfterCursorCloseMS;
+        return SyncMongoCluster.getSleepAfterCursorClose();
     }
 
     public static long getSleepAfterSessionClose() {
-        return sleepAfterSessionCloseMS;
+        return SyncMongoCluster.getSleepAfterSessionClose();
     }
 
     private final com.mongodb.reactivestreams.client.MongoClient wrapped;
+    private final SyncMongoCluster delegate;
 
     public SyncMongoClient(final com.mongodb.reactivestreams.client.MongoClient wrapped) {
         this.wrapped = wrapped;
+        this.delegate = new SyncMongoCluster(wrapped);
     }
 
     public com.mongodb.reactivestreams.client.MongoClient getWrapped() {
@@ -140,102 +132,151 @@ public com.mongodb.reactivestreams.client.MongoClient getWrapped() {
     }
 
     @Override
-    public MongoDatabase getDatabase(final String databaseName) {
-        return new SyncMongoDatabase(wrapped.getDatabase(databaseName));
+    public CodecRegistry getCodecRegistry() {
+        return delegate.getCodecRegistry();
     }
 
     @Override
-    public ClientSession startSession() {
-        return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this);
+    public ReadPreference getReadPreference() {
+        return delegate.getReadPreference();
     }
 
     @Override
-    public ClientSession startSession(final ClientSessionOptions options) {
-        return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this);
+    public WriteConcern getWriteConcern() {
+        return delegate.getWriteConcern();
     }
 
     @Override
-    public void close() {
-        wrapped.close();
+    public ReadConcern getReadConcern() {
+        return delegate.getReadConcern();
+    }
+
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return delegate.getTimeout(timeUnit);
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return delegate.withCodecRegistry(codecRegistry);
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return delegate.withReadPreference(readPreference);
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return delegate.withWriteConcern(writeConcern);
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return delegate.withReadConcern(readConcern);
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return delegate.withTimeout(timeout, timeUnit);
+    }
+
+    @Override
+    public MongoDatabase getDatabase(final String databaseName) {
+        return delegate.getDatabase(databaseName);
+    }
+
+    @Override
+    public ClientSession startSession() {
+        return delegate.startSession();
+    }
+
+    @Override
+    public ClientSession startSession(final ClientSessionOptions options) {
+        return delegate.startSession(options);
     }
 
     @Override
     public MongoIterable<String> listDatabaseNames() {
-        return listDatabases(BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue());
+        return delegate.listDatabaseNames();
     }
 
     @Override
     public MongoIterable<String> listDatabaseNames(final ClientSession clientSession) {
-        return listDatabases(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue());
+        return delegate.listDatabaseNames(clientSession);
     }
 
+
     @Override
     public ListDatabasesIterable<Document> listDatabases() {
-        return new SyncListDatabasesIterable<>(wrapped.listDatabases());
+        return delegate.listDatabases();
     }
 
     @Override
     public ListDatabasesIterable<Document> listDatabases(final ClientSession clientSession) {
-        return listDatabases(clientSession, Document.class);
+        return delegate.listDatabases(clientSession);
     }
 
     @Override
     public <TResult> ListDatabasesIterable<TResult> listDatabases(final Class<TResult> resultClass) {
-        return new SyncListDatabasesIterable<>(wrapped.listDatabases(resultClass));
+        return delegate.listDatabases(resultClass);
     }
 
     @Override
     public <TResult> ListDatabasesIterable<TResult> listDatabases(final ClientSession clientSession, final Class<TResult> resultClass) {
-        return new SyncListDatabasesIterable<>(wrapped.listDatabases(unwrap(clientSession), resultClass));
+        return delegate.listDatabases(clientSession, resultClass);
     }
 
     @Override
     public ChangeStreamIterable<Document> watch() {
-        return new SyncChangeStreamIterable<>(wrapped.watch());
+        return delegate.watch();
     }
 
     @Override
     public <TResult> ChangeStreamIterable<TResult> watch(final Class<TResult> resultClass) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(resultClass));
+        return delegate.watch(resultClass);
     }
 
     @Override
     public ChangeStreamIterable<Document> watch(final List<? extends Bson> pipeline) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(pipeline));
+        return delegate.watch(pipeline);
     }
 
     @Override
     public <TResult> ChangeStreamIterable<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass));
+        return delegate.watch(pipeline, resultClass);
     }
 
     @Override
     public ChangeStreamIterable<Document> watch(final ClientSession clientSession) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession)));
+        return delegate.watch(clientSession);
     }
 
     @Override
     public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final Class<TResult> resultClass) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass));
+        return delegate.watch(clientSession, resultClass);
     }
 
     @Override
     public ChangeStreamIterable<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline));
+        return delegate.watch(clientSession, pipeline);
     }
 
     @Override
-    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
-                                                         final Class<TResult> resultClass) {
-        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass));
+    public <TResult> ChangeStreamIterable<TResult> watch(
+            final ClientSession clientSession, final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return delegate.watch(clientSession, pipeline, resultClass);
     }
 
+    @Override
+    public void close() {
+        wrapped.close();
+    }
+
+
     @Override
     public ClusterDescription getClusterDescription() {
         return wrapped.getClusterDescription();
     }
 
-    private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) {
-        return ((SyncClientSession) clientSession).getWrapped();
-    }
 }
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java
new file mode 100644
index 00000000000..780f7260eb4
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.syncadapter;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.ChangeStreamIterable;
+import com.mongodb.client.ClientSession;
+import com.mongodb.client.ListDatabasesIterable;
+import com.mongodb.client.MongoCluster;
+import com.mongodb.client.MongoDatabase;
+import com.mongodb.client.MongoIterable;
+import org.bson.BsonDocument;
+import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+import reactor.core.publisher.Mono;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
+import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
+import static java.util.Objects.requireNonNull;
+
+public class SyncMongoCluster implements MongoCluster {
+
+    private static long sleepAfterCursorOpenMS;
+    private static long sleepAfterCursorErrorMS;
+    private static long sleepAfterCursorCloseMS;
+    private static long sleepAfterSessionCloseMS;
+
+    /**
+     * Unfortunately this is the only way to wait for a query to be initiated, since Reactive Streams is asynchronous
+     * and we have no way of knowing. Tests which require cursor initiation to complete before execution of the next operation
+     * can set this to a positive value.  A value of 256 ms has been shown to work well. The default value is 0.
+     */
+    public static void enableSleepAfterCursorOpen(final long sleepMS) {
+        if (sleepAfterCursorOpenMS != 0) {
+            throw new IllegalStateException("Already enabled");
+        }
+        if (sleepMS <= 0) {
+            throw new IllegalArgumentException("sleepMS must be a positive value");
+        }
+        sleepAfterCursorOpenMS = sleepMS;
+    }
+
+    /**
+     * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous.
+     * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before
+     * execution of the next operation can set this to a positive value.  A value of 256 ms has been shown to work well. The default
+     * value is 0.
+     */
+    public static void enableSleepAfterCursorError(final long sleepMS) {
+        if (sleepAfterCursorErrorMS != 0) {
+            throw new IllegalStateException("Already enabled");
+        }
+        if (sleepMS <= 0) {
+            throw new IllegalArgumentException("sleepMS must be a positive value");
+        }
+        sleepAfterCursorErrorMS = sleepMS;
+    }
+
+    /**
+     * Unfortunately this is the only way to wait for close to complete, since it's asynchronous.
+     * This is inherently racy but there are not any other good options. Tests which require cursor cancellation to complete before
+     * execution of the next operation can set this to a positive value.  A value of 256 ms has been shown to work well. The default
+     * value is 0.
+     */
+    public static void enableSleepAfterCursorClose(final long sleepMS) {
+        if (sleepAfterCursorCloseMS != 0) {
+            throw new IllegalStateException("Already enabled");
+        }
+        if (sleepMS <= 0) {
+            throw new IllegalArgumentException("sleepMS must be a positive value");
+        }
+        sleepAfterCursorCloseMS = sleepMS;
+    }
+
+
+    /**
+     * Enables {@linkplain Thread#sleep(long) sleeping} in {@link SyncClientSession#close()} to wait until asynchronous closing actions
+     * are done. It is an attempt to make asynchronous {@link SyncMongoClient#close()} method synchronous;
+     * the attempt is racy and incorrect, but good enough for tests given that no other approach is available.
+     */
+    public static void enableSleepAfterSessionClose(final long sleepMS) {
+        if (sleepAfterSessionCloseMS != 0) {
+            throw new IllegalStateException("Already enabled");
+        }
+        if (sleepMS <= 0) {
+            throw new IllegalArgumentException("sleepMS must be a positive value");
+        }
+        sleepAfterSessionCloseMS = sleepMS;
+    }
+
+    public static void disableSleep() {
+        sleepAfterCursorOpenMS = 0;
+        sleepAfterCursorErrorMS = 0;
+        sleepAfterCursorCloseMS = 0;
+        sleepAfterSessionCloseMS = 0;
+    }
+
+    public static long getSleepAfterCursorOpen() {
+        return sleepAfterCursorOpenMS;
+    }
+
+    public static long getSleepAfterCursorError() {
+        return sleepAfterCursorErrorMS;
+    }
+
+    public static long getSleepAfterCursorClose() {
+        return sleepAfterCursorCloseMS;
+    }
+
+    public static long getSleepAfterSessionClose() {
+        return sleepAfterSessionCloseMS;
+    }
+
+    private final com.mongodb.reactivestreams.client.MongoCluster wrapped;
+
+    public SyncMongoCluster(final com.mongodb.reactivestreams.client.MongoCluster wrapped) {
+        this.wrapped = wrapped;
+    }
+
+    public com.mongodb.reactivestreams.client.MongoCluster getWrapped() {
+        return wrapped;
+    }
+
+    @Override
+    public CodecRegistry getCodecRegistry() {
+        return wrapped.getCodecRegistry();
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return wrapped.getReadPreference();
+    }
+
+    @Override
+    public WriteConcern getWriteConcern() {
+        return wrapped.getWriteConcern();
+    }
+
+    @Override
+    public ReadConcern getReadConcern() {
+        return wrapped.getReadConcern();
+    }
+
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return wrapped.getTimeout(timeUnit);
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return new SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry));
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return new SyncMongoCluster(wrapped.withReadPreference(readPreference));
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return new SyncMongoCluster(wrapped.withWriteConcern(writeConcern));
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return new SyncMongoCluster(wrapped.withReadConcern(readConcern));
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit));
+    }
+
+    @Override
+    public MongoDatabase getDatabase(final String databaseName) {
+        return new SyncMongoDatabase(wrapped.getDatabase(databaseName));
+    }
+
+    @Override
+    public ClientSession startSession() {
+        return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this);
+    }
+
+    @Override
+    public ClientSession startSession(final ClientSessionOptions options) {
+        return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this);
+    }
+
+    @Override
+    public MongoIterable<String> listDatabaseNames() {
+        return listDatabases(BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue());
+    }
+
+    @Override
+    public MongoIterable<String> listDatabaseNames(final ClientSession clientSession) {
+        return listDatabases(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue());
+    }
+
+    @Override
+    public ListDatabasesIterable<Document> listDatabases() {
+        return new SyncListDatabasesIterable<>(wrapped.listDatabases());
+    }
+
+    @Override
+    public ListDatabasesIterable<Document> listDatabases(final ClientSession clientSession) {
+        return listDatabases(clientSession, Document.class);
+    }
+
+    @Override
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final Class<TResult> resultClass) {
+        return new SyncListDatabasesIterable<>(wrapped.listDatabases(resultClass));
+    }
+
+    @Override
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final ClientSession clientSession, final Class<TResult> resultClass) {
+        return new SyncListDatabasesIterable<>(wrapped.listDatabases(unwrap(clientSession), resultClass));
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch() {
+        return new SyncChangeStreamIterable<>(wrapped.watch());
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final Class<TResult> resultClass) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(resultClass));
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final List<? extends Bson> pipeline) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(pipeline));
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass));
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession)));
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final Class<TResult> resultClass) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass));
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline));
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
+                                                         final Class<TResult> resultClass) {
+        return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass));
+    }
+
+    private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) {
+        return ((SyncClientSession) clientSession).getWrapped();
+    }
+}
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java
index 64d94984b2e..922e07cc2d5 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java
@@ -59,6 +59,7 @@
 import reactor.core.publisher.Mono;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
 import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
@@ -102,6 +103,11 @@ public ReadConcern getReadConcern() {
         return wrapped.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return wrapped.getTimeout(timeUnit);
+    }
+
     @Override
     public <NewTDocument> MongoCollection<NewTDocument> withDocumentClass(final Class<NewTDocument> clazz) {
         return new SyncMongoCollection<>(wrapped.withDocumentClass(clazz));
@@ -127,6 +133,11 @@ public MongoCollection<T> withReadConcern(final ReadConcern readConcern) {
         return new SyncMongoCollection<>(wrapped.withReadConcern(readConcern));
     }
 
+    @Override
+    public MongoCollection<T> withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new SyncMongoCollection<>(wrapped.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public long countDocuments() {
         return requireNonNull(Mono.from(wrapped.countDocuments()).contextWrite(CONTEXT).block(TIMEOUT_DURATION));
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java
index 63485fba132..4e0159f90d0 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java
@@ -44,6 +44,7 @@
 import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
 import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
 import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorClose;
+import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorError;
 import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorOpen;
 import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.isWaitForBatchCursorCreationEnabled;
 
@@ -91,6 +92,7 @@ public void onNext(final T t) {
             @Override
             public void onError(final Throwable t) {
                 results.addLast(t);
+                sleep(getSleepAfterCursorError());
             }
 
             @Override
@@ -155,6 +157,7 @@ public boolean hasNext() {
                 throw new MongoTimeoutException("Time out waiting for result from cursor");
             } else if (next instanceof Throwable) {
                 error = translateError((Throwable) next);
+                sleep(getSleepAfterCursorError());
                 throw error;
             } else if (next == COMPLETED) {
                 completed = true;
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java
index f1e6d125842..40b15632366 100644
--- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java
@@ -34,6 +34,7 @@
 import reactor.core.publisher.Mono;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
 import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT;
@@ -75,6 +76,11 @@ public ReadConcern getReadConcern() {
         return wrapped.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return wrapped.getTimeout(timeUnit);
+    }
+
     @Override
     public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) {
         return new SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry));
@@ -95,6 +101,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) {
         return new SyncMongoDatabase(wrapped.withReadConcern(readConcern));
     }
 
+    @Override
+    public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public MongoCollection<Document> getCollection(final String collectionName) {
         return new SyncMongoCollection<>(wrapped.getCollection(collectionName));
diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java
new file mode 100644
index 00000000000..b109931bedf
--- /dev/null
+++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.unified;
+
+import com.mongodb.ClusterFixture;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.client.MongoClient;
+import com.mongodb.connection.TransportSettings;
+import com.mongodb.lang.Nullable;
+import com.mongodb.reactivestreams.client.MongoClients;
+import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient;
+import org.bson.BsonArray;
+import org.bson.BsonDocument;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import reactor.core.publisher.Hooks;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.mongodb.client.ClientSideOperationTimeoutTest.skipOperationTimeoutTests;
+import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep;
+import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorError;
+import static java.lang.String.format;
+import static java.util.Arrays.asList;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+
+
+// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests
+public class ClientSideOperationTimeoutTest extends UnifiedReactiveStreamsTest {
+
+    private final AtomicReference<Throwable> atomicReferenceThrowable = new AtomicReference<>();
+
+    private static Collection<Arguments> data() throws URISyntaxException, IOException {
+        return getTestData("unified-test-format/client-side-operation-timeout");
+    }
+
+    @Override
+    protected void skips(final String fileDescription, final String testDescription) {
+        skipOperationTimeoutTests(fileDescription, testDescription);
+
+        assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set"),
+                "No iterateOnce support. There is alternative prose test for it.");
+        assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is set"),
+                "No iterateOnce support. There is alternative prose test for it.");
+        /*
+           The Reactive Streams specification prevents us from allowing a subsequent next call (event in reactive terms) after a timeout error,
+           conflicting with the CSOT spec requirement not to invalidate the change stream and to try resuming and establishing a new change
+           stream on the server. We immediately let users know about a timeout error, which then closes the stream/publisher.
+         */
+        assumeFalse(testDescription.equals("change stream can be iterated again if previous iteration times out"),
+                "It is not possible due to a conflict with the Reactive Streams specification .");
+        assumeFalse(testDescription.equals("timeoutMS applies to full resume attempt in a next call"),
+                "Flaky and racy due to asynchronous behaviour. There is alternative prose test for it.");
+        assumeFalse(testDescription.equals("timeoutMS applied to initial aggregate"),
+                "No way to catch an error on BarchCursor creation. There is alternative prose test for it.");
+
+        assumeFalse(testDescription.endsWith("createChangeStream on client"));
+        assumeFalse(testDescription.endsWith("createChangeStream on database"));
+        assumeFalse(testDescription.endsWith("createChangeStream on collection"));
+
+        // No withTransaction support
+        assumeFalse(fileDescription.contains("withTransaction") || testDescription.contains("withTransaction"));
+
+        if (testDescription.equals("timeoutMS is refreshed for close")) {
+            enableSleepAfterCursorError(256);
+        }
+
+        /*
+         * The test is occasionally racy. The "killCursors" command may appear as an additional event. This is unexpected in unified tests,
+         * but anticipated in reactive streams because an operation timeout error triggers the closure of the stream/publisher.
+         */
+        ignoreExtraCommandEvents(testDescription.contains("timeoutMS is refreshed for getMore - failure"));
+
+        Hooks.onOperatorDebug();
+        Hooks.onErrorDropped(atomicReferenceThrowable::set);
+    }
+
+    @ParameterizedTest(name = "{0}: {1}")
+    @MethodSource("data")
+    @Override
+    public void shouldPassAllOutcomes(
+            @Nullable final String fileDescription,
+            @Nullable final String testDescription,
+            final String schemaVersion,
+            @Nullable final BsonArray runOnRequirements,
+            final BsonArray entitiesArray,
+            final BsonArray initialData,
+            final BsonDocument definition) {
+        try {
+            super.shouldPassAllOutcomes(fileDescription,
+                    testDescription,
+                    schemaVersion,
+                    runOnRequirements,
+                    entitiesArray,
+                    initialData,
+                    definition);
+
+        } catch (AssertionError e) {
+            assertNoDroppedError(format("%s failed due to %s.\n"
+                            + "The test also caused a dropped error; `onError` called with no handler.",
+                    testDescription, e.getMessage()));
+            if (racyTestAssertion(testDescription, e)) {
+                // Ignore failure - racy test often no time to do the getMore
+                return;
+            }
+            throw e;
+        }
+        assertNoDroppedError(format("%s passed but there was a dropped error; `onError` called with no handler.", testDescription));
+    }
+    @Override
+    protected MongoClient createMongoClient(final MongoClientSettings settings) {
+        TransportSettings overriddenTransportSettings = ClusterFixture.getOverriddenTransportSettings();
+        MongoClientSettings clientSettings = overriddenTransportSettings == null ? settings
+                : MongoClientSettings.builder(settings).transportSettings(overriddenTransportSettings).build();
+        return new SyncMongoClient(MongoClients.create(clientSettings));
+    }
+
+    @AfterEach
+    public void cleanUp() {
+        super.cleanUp();
+        disableSleep();
+        Hooks.resetOnOperatorDebug();
+        Hooks.resetOnErrorDropped();
+    }
+
+    public static boolean racyTestAssertion(final String testDescription, final AssertionError e) {
+        return RACY_GET_MORE_TESTS.contains(testDescription) && e.getMessage().startsWith("Number of events must be the same");
+    }
+
+    private static final List<String> RACY_GET_MORE_TESTS = asList(
+            "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime",
+            "remaining timeoutMS applied to getMore if timeoutMode is unset");
+
+    private void assertNoDroppedError(final String message) {
+        Throwable droppedError = atomicReferenceThrowable.get();
+        if (droppedError != null) {
+            throw new AssertionError(message, droppedError);
+        }
+    }
+}
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java
index 17fb4479e8c..cfbf5a0a5b8 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java
@@ -42,7 +42,7 @@
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singletonList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
@@ -77,17 +77,17 @@ void shouldBuildTheExpectedOperation() {
                 .collation(COLLATION)
                 .comment("my comment")
                 .hint(BsonDocument.parse("{a: 1}"))
-                .maxAwaitTime(20, SECONDS)
-                .maxTime(10, SECONDS);
+                .maxAwaitTime(1001, MILLISECONDS)
+                .maxTime(101, MILLISECONDS);
 
-        expectedOperation
+        expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline,
+                getDefaultCodecRegistry().get(Document.class))
+                .retryReads(true)
                 .allowDiskUse(true)
                 .batchSize(100)
                 .collation(COLLATION)
                 .comment(new BsonString("my comment"))
-                .hint(BsonDocument.parse("{a: 1}"))
-                .maxAwaitTime(20, SECONDS)
-                .maxTime(10, SECONDS);
+                .hint(BsonDocument.parse("{a: 1}"));
 
         Flux.from(publisher).blockFirst();
         assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation());
@@ -104,7 +104,7 @@ void shouldBuildTheExpectedOperationForHintString() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateOperation<Document> expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline,
-                getDefaultCodecRegistry().get(Document.class))
+                                                                                  getDefaultCodecRegistry().get(Document.class))
                 .batchSize(Integer.MAX_VALUE)
                 .retryReads(true);
 
@@ -128,7 +128,7 @@ void shouldBuildTheExpectedOperationForHintPlusHintString() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateOperation<Document> expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline,
-                getDefaultCodecRegistry().get(Document.class))
+                                                                                  getDefaultCodecRegistry().get(Document.class))
                 .batchSize(Integer.MAX_VALUE)
                 .retryReads(true);
 
@@ -156,8 +156,7 @@ void shouldBuildTheExpectedOperationsForDollarOut() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
-                                                                                              ReadConcern.DEFAULT,
-                                                                                              WriteConcern.ACKNOWLEDGED);
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         // default input should be as expected
         Flux.from(publisher).blockFirst();
@@ -174,16 +173,16 @@ void shouldBuildTheExpectedOperationsForDollarOut() {
                 .collation(COLLATION)
                 .comment("my comment")
                 .hint(BsonDocument.parse("{a: 1}"))
-                .maxAwaitTime(20, SECONDS) // Ignored on $out
-                .maxTime(10, SECONDS);
+                .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out
+                .maxTime(100, MILLISECONDS);
 
-        expectedOperation
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
+                ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED)
                 .allowDiskUse(true)
                 .bypassDocumentValidation(true)
                 .collation(COLLATION)
                 .comment(new BsonString("my comment"))
-                .hint(BsonDocument.parse("{a: 1}"))
-                .maxTime(10, SECONDS);
+                .hint(BsonDocument.parse("{a: 1}"));
 
         Flux.from(publisher).blockFirst();
         assertEquals(ReadPreference.primary(), executor.getReadPreference());
@@ -195,8 +194,6 @@ void shouldBuildTheExpectedOperationsForDollarOut() {
                         .batchSize(100)
                         .collation(COLLATION)
                         .filter(new BsonDocument())
-                        .maxAwaitTime(0, SECONDS)
-                        .maxTime(0, SECONDS)
                         .comment(new BsonString("my comment"))
                         .retryReads(true);
 
@@ -205,7 +202,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() {
         // Should handle database level aggregations
         publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE);
 
-        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT,
+                                                               WriteConcern.ACKNOWLEDGED);
 
         Flux.from(publisher).blockFirst();
         operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation();
@@ -215,7 +213,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() {
         // Should handle toCollection
         publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
-        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT,
+                                                               WriteConcern.ACKNOWLEDGED);
 
         // default input should be as expected
         Flux.from(publisher.toCollection()).blockFirst();
@@ -235,8 +234,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintString() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
-                ReadConcern.DEFAULT,
-                WriteConcern.ACKNOWLEDGED);
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         publisher
                 .hintString("x_1");
@@ -263,8 +261,7 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintPlusHintString() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
-                ReadConcern.DEFAULT,
-                WriteConcern.ACKNOWLEDGED);
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         publisher
                 .hint(new Document("x", 1))
@@ -296,8 +293,8 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION)
                         .toCollection();
 
-        AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT,
-                                                                                              WriteConcern.ACKNOWLEDGED);
+        AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         Flux.from(toCollectionPublisher).blockFirst();
         assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation());
@@ -337,8 +334,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
-                                                                                              ReadConcern.DEFAULT,
-                                                                                              WriteConcern.ACKNOWLEDGED);
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         // default input should be as expected
         Flux.from(publisher).blockFirst();
@@ -355,16 +351,16 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() {
                 .collation(COLLATION)
                 .comment(new BsonInt32(1))
                 .hint(BsonDocument.parse("{a: 1}"))
-                .maxAwaitTime(20, SECONDS) // Ignored on $out
-                .maxTime(10, SECONDS);
+                .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out
+                .maxTime(100, MILLISECONDS);
 
-        expectedOperation
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
+                ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED)
                 .allowDiskUse(true)
                 .bypassDocumentValidation(true)
                 .collation(COLLATION)
                 .comment(new BsonInt32(1))
-                .hint(BsonDocument.parse("{a: 1}"))
-                .maxTime(10, SECONDS);
+                .hint(BsonDocument.parse("{a: 1}"));
 
         Flux.from(publisher).blockFirst();
         assertEquals(ReadPreference.primary(), executor.getReadPreference());
@@ -376,8 +372,6 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() {
                         .batchSize(100)
                         .collation(COLLATION)
                         .filter(new BsonDocument())
-                        .maxAwaitTime(0, SECONDS)
-                        .maxTime(0, SECONDS)
                         .comment(new BsonInt32(1))
                         .retryReads(true);
 
@@ -386,7 +380,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() {
         // Should handle database level aggregations
         publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE);
 
-        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT,
+                                                               WriteConcern.ACKNOWLEDGED);
 
         Flux.from(publisher).blockFirst();
         operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation();
@@ -396,7 +391,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() {
         // Should handle toCollection
         publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
-        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
+        expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT,
+                                                               WriteConcern.ACKNOWLEDGED);
 
         // default input should be as expected
         Flux.from(publisher.toCollection()).blockFirst();
@@ -416,8 +412,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() {
                 new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION);
 
         AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline,
-                ReadConcern.DEFAULT,
-                WriteConcern.ACKNOWLEDGED);
+                                                                                              ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED);
 
         // default input should be as expected
         Flux.from(publisher).blockFirst();
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java
index d8a0083173c..7c2ab637c27 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java
@@ -40,7 +40,7 @@
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singletonList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
@@ -57,7 +57,8 @@ void shouldBuildTheExpectedOperation() {
                                                                                     Document.class, pipeline, ChangeStreamLevel.COLLECTION);
 
         ChangeStreamOperation<ChangeStreamDocument<Document>> expectedOperation =
-                new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, codec)
+                new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline,
+                        codec)
                         .batchSize(Integer.MAX_VALUE)
                         .retryReads(true);
 
@@ -72,16 +73,17 @@ void shouldBuildTheExpectedOperation() {
                 .batchSize(100)
                 .collation(COLLATION)
                 .comment("comment")
-                .maxAwaitTime(20, SECONDS)
+                .maxAwaitTime(101, MILLISECONDS)
                 .fullDocument(FullDocument.UPDATE_LOOKUP);
 
-        expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline,
-                codec).retryReads(true);
+        expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP,
+                FullDocumentBeforeChange.DEFAULT,
+                pipeline,
+                                                        codec).retryReads(true);
         expectedOperation
                 .batchSize(100)
                 .collation(COLLATION)
-                .comment(new BsonString("comment"))
-                .maxAwaitTime(20, SECONDS);
+                .comment(new BsonString("comment"));
 
         Flux.from(publisher).blockFirst();
         assertEquals(ReadPreference.primary(), executor.getReadPreference());
@@ -103,7 +105,7 @@ void shouldBuildTheExpectedOperationWhenSettingDocumentClass() {
 
         ChangeStreamOperation<BsonDocument> expectedOperation =
                 new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline,
-                        getDefaultCodecRegistry().get(BsonDocument.class))
+                                            getDefaultCodecRegistry().get(BsonDocument.class))
                         .batchSize(batchSize)
                         .comment(new BsonInt32(1))
                         .retryReads(true);
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy
index 4879fa19466..d6233342291 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy
@@ -23,7 +23,6 @@ import com.mongodb.async.FutureResultCallback
 import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding
 import com.mongodb.internal.binding.AsyncClusterBinding
 import com.mongodb.internal.binding.AsyncConnectionSource
@@ -34,15 +33,19 @@ import com.mongodb.internal.session.ClientSessionContext
 import com.mongodb.reactivestreams.client.ClientSession
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class ClientSessionBindingSpecification extends Specification {
     def 'should return the session context from the binding'() {
         given:
         def session = Stub(ClientSession)
-        def wrappedBinding = Stub(AsyncClusterAwareReadWriteBinding)
+        def wrappedBinding = Stub(AsyncClusterAwareReadWriteBinding) {
+            getOperationContext() >> OPERATION_CONTEXT
+        }
         def binding = new ClientSessionBinding(session, false, wrappedBinding)
 
         when:
-        def context = binding.getSessionContext()
+        def context = binding.getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -51,7 +54,9 @@ class ClientSessionBindingSpecification extends Specification {
     def 'should return the session context from the connection source'() {
         given:
         def session = Stub(ClientSession)
-        def wrappedBinding = Mock(AsyncClusterAwareReadWriteBinding)
+        def wrappedBinding = Mock(AsyncClusterAwareReadWriteBinding) {
+            getOperationContext() >> OPERATION_CONTEXT
+        }
         wrappedBinding.retain() >> wrappedBinding
         def binding = new ClientSessionBinding(session, false, wrappedBinding)
 
@@ -65,7 +70,7 @@ class ClientSessionBindingSpecification extends Specification {
         }
 
         when:
-        def context = futureResultCallback.get().getSessionContext()
+        def context = futureResultCallback.get().getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -80,7 +85,7 @@ class ClientSessionBindingSpecification extends Specification {
         }
 
         when:
-        context = futureResultCallback.get().getSessionContext()
+        context = futureResultCallback.get().getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -166,7 +171,7 @@ class ClientSessionBindingSpecification extends Specification {
         def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding)
 
         then:
-        binding.getSessionContext().isImplicitSession() == ownsSession
+        binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession
 
         where:
         ownsSession << [true, false]
@@ -182,6 +187,6 @@ class ClientSessionBindingSpecification extends Specification {
                         .build()), null)
             }
         }
-        new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE)
+        new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)
     }
 }
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java
index 62a7596a681..eab28373f2a 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java
@@ -34,7 +34,6 @@
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.SECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class FindPublisherImplTest extends TestHelper {
@@ -50,7 +49,8 @@ void shouldBuildTheExpectedOperation() {
         TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor()));
         FindPublisher<Document> publisher = new FindPublisherImpl<>(null, createMongoOperationPublisher(executor), new Document());
 
-        FindOperation<Document> expectedOperation = new FindOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class))
+        FindOperation<Document> expectedOperation = new FindOperation<>(NAMESPACE,
+                                                                        getDefaultCodecRegistry().get(Document.class))
                 .batchSize(Integer.MAX_VALUE)
                 .retryReads(true)
                 .filter(new BsonDocument());
@@ -66,8 +66,8 @@ void shouldBuildTheExpectedOperation() {
                 .filter(new Document("filter", 1))
                 .sort(Sorts.ascending("sort"))
                 .projection(new Document("projection", 1))
-                .maxTime(10, SECONDS)
-                .maxAwaitTime(20, SECONDS)
+                .maxTime(101, MILLISECONDS)
+                .maxAwaitTime(1001, MILLISECONDS)
                 .batchSize(100)
                 .limit(100)
                 .skip(10)
@@ -83,7 +83,10 @@ void shouldBuildTheExpectedOperation() {
                 .showRecordId(false)
                 .allowDiskUse(false);
 
-        expectedOperation
+        expectedOperation = new FindOperation<>(NAMESPACE,
+                                                getDefaultCodecRegistry().get(Document.class))
+                .retryReads(true)
+                .filter(new BsonDocument())
                 .allowDiskUse(false)
                 .batchSize(100)
                 .collation(COLLATION)
@@ -93,8 +96,6 @@ void shouldBuildTheExpectedOperation() {
                 .hint(new BsonString("a_1"))
                 .limit(100)
                 .max(new BsonDocument("max", new BsonInt32(1)))
-                .maxAwaitTime(20000, MILLISECONDS)
-                .maxTime(10000, MILLISECONDS)
                 .min(new BsonDocument("min", new BsonInt32(1)))
                 .projection(new BsonDocument("projection", new BsonInt32(1)))
                 .returnKey(false)
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java
index 36891f1031f..6613723b49d 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java
@@ -35,6 +35,7 @@ final class ListCollectionNamesPublisherImplTest extends TestHelper {
 
     private static final String DATABASE_NAME = NAMESPACE.getDatabaseName();
 
+    @SuppressWarnings("deprecation")
     @DisplayName("Should build the expected ListCollectionsOperation")
     @Test
     void shouldBuildTheExpectedOperation() {
@@ -45,7 +46,7 @@ void shouldBuildTheExpectedOperation() {
                 .authorizedCollections(true);
 
         ListCollectionsOperation<Document> expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME,
-                                                                                            getDefaultCodecRegistry().get(Document.class))
+                                                                                              getDefaultCodecRegistry().get(Document.class))
                 .batchSize(Integer.MAX_VALUE)
                 .nameOnly(true)
                 .authorizedCollections(true)
@@ -63,9 +64,12 @@ void shouldBuildTheExpectedOperation() {
                 .maxTime(10, SECONDS)
                 .batchSize(100);
 
-        expectedOperation
+        expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME,
+                                                           getDefaultCodecRegistry().get(Document.class))
+                .nameOnly(true)
+                .authorizedCollections(true)
+                .retryReads(true)
                 .filter(new BsonDocument("filter", new BsonInt32(1)))
-                .maxTime(10, SECONDS)
                 .batchSize(100);
 
         Flux.from(publisher).blockFirst();
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java
index c875ab7973c..a632edbae82 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java
@@ -28,7 +28,7 @@
 
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class ListCollectionsPublisherImplTest extends TestHelper {
@@ -56,12 +56,14 @@ void shouldBuildTheExpectedOperation() {
         // Should apply settings
         publisher
                 .filter(new Document("filter", 1))
-                .maxTime(10, SECONDS)
+                .maxTime(100, MILLISECONDS)
                 .batchSize(100);
 
-        expectedOperation
+        expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME,
+                                                           getDefaultCodecRegistry().get(String.class))
+                .nameOnly(true)
+                .retryReads(true)
                 .filter(new BsonDocument("filter", new BsonInt32(1)))
-                .maxTime(10, SECONDS)
                 .batchSize(100);
 
         Flux.from(publisher).blockFirst();
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java
index 749f11b8e0a..c19a56f14cc 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java
@@ -28,7 +28,7 @@
 
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class ListDatabasesPublisherImplTest extends TestHelper {
@@ -41,7 +41,8 @@ void shouldBuildTheExpectedOperation() {
         TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor()));
         ListDatabasesPublisher<Document> publisher = new ListDatabasesPublisherImpl<>(null, createMongoOperationPublisher(executor));
 
-        ListDatabasesOperation<Document> expectedOperation = new ListDatabasesOperation<>(getDefaultCodecRegistry().get(Document.class))
+        ListDatabasesOperation<Document> expectedOperation = new ListDatabasesOperation<>(
+                getDefaultCodecRegistry().get(Document.class))
                 .retryReads(true);
 
         // default input should be as expected
@@ -54,13 +55,14 @@ void shouldBuildTheExpectedOperation() {
         publisher
                 .authorizedDatabasesOnly(true)
                 .filter(new Document("filter", 1))
-                .maxTime(10, SECONDS)
+                .maxTime(100, MILLISECONDS)
                 .batchSize(100);
 
-        expectedOperation
+        expectedOperation = new ListDatabasesOperation<>(
+                getDefaultCodecRegistry().get(Document.class))
+                .retryReads(true)
                 .authorizedDatabasesOnly(true)
-                .filter(new BsonDocument("filter", new BsonInt32(1)))
-                .maxTime(10, SECONDS);
+                .filter(new BsonDocument("filter", new BsonInt32(1)));
 
         configureBatchCursor();
         Flux.from(publisher).blockFirst();
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java
index 1929c4c3476..5ae221b8a02 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java
@@ -27,7 +27,7 @@
 
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class ListIndexesPublisherImplTest extends TestHelper {
@@ -54,13 +54,13 @@ void shouldBuildTheExpectedOperation() {
         assertEquals(ReadPreference.primary(), executor.getReadPreference());
 
         // Should apply settings
-        publisher
-                .batchSize(100)
-                .maxTime(10, SECONDS);
+        publisher.batchSize(100)
+                .maxTime(100, MILLISECONDS);
 
-        expectedOperation
-                .batchSize(100)
-                .maxTime(10, SECONDS);
+        expectedOperation =
+                new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class))
+                        .batchSize(100)
+                        .retryReads(true);
 
         configureBatchCursor();
         Flux.from(publisher).blockFirst();
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java
index 451772e5751..c112395a818 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java
@@ -36,7 +36,7 @@
 
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Arrays.asList;
-import static java.util.concurrent.TimeUnit.SECONDS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -57,9 +57,9 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() {
         com.mongodb.reactivestreams.client.MapReducePublisher<Document> publisher =
                 new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION);
 
-        MapReduceWithInlineResultsOperation<Document> expectedOperation =
-                new MapReduceWithInlineResultsOperation<>(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION),
-                                                          getDefaultCodecRegistry().get(Document.class)).verbose(true);
+        MapReduceWithInlineResultsOperation<Document> expectedOperation = new MapReduceWithInlineResultsOperation<>(
+                NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION),
+                getDefaultCodecRegistry().get(Document.class)).verbose(true);
 
         // default input should be as expected
         Flux.from(publisher).blockFirst();
@@ -78,19 +78,19 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() {
                 .filter(new Document("filter", 1))
                 .finalizeFunction(FINALIZE_FUNCTION)
                 .limit(999)
-                .maxTime(10, SECONDS)
+                .maxTime(100, MILLISECONDS)
                 .scope(new Document("scope", 1))
                 .sort(Sorts.ascending("sort"))
                 .verbose(false);
 
-        expectedOperation
-                .collation(COLLATION)
+        expectedOperation = new MapReduceWithInlineResultsOperation<>(
+                NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION),
+                getDefaultCodecRegistry().get(Document.class))
+                .verbose(true)
                 .collation(COLLATION)
                 .filter(BsonDocument.parse("{filter: 1}"))
                 .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION))
                 .limit(999)
-                .maxTime(10, SECONDS)
-                .maxTime(10, SECONDS)
                 .scope(new BsonDocument("scope", new BsonInt32(1)))
                 .sort(new BsonDocument("sort", new BsonInt32(1)))
                 .verbose(false);
@@ -114,9 +114,7 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() {
                         .collectionName(NAMESPACE.getCollectionName());
 
         MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(NAMESPACE,
-                                                                                              new BsonJavaScript(MAP_FUNCTION),
-                                                                                              new BsonJavaScript(REDUCE_FUNCTION),
-                                                                                              NAMESPACE.getCollectionName(),
+                                                                                              new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(),
                                                                                               WriteConcern.ACKNOWLEDGED).verbose(true);
 
         // default input should be as expected
@@ -131,19 +129,19 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() {
                 .filter(new Document("filter", 1))
                 .finalizeFunction(FINALIZE_FUNCTION)
                 .limit(999)
-                .maxTime(10, SECONDS)
+                .maxTime(100, MILLISECONDS)
                 .scope(new Document("scope", 1))
                 .sort(Sorts.ascending("sort"))
                 .verbose(false);
 
-        expectedOperation
+        expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, new BsonJavaScript(MAP_FUNCTION),
+                                                               new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED)
+                .verbose(true)
                 .collation(COLLATION)
                 .bypassDocumentValidation(true)
                 .filter(BsonDocument.parse("{filter: 1}"))
                 .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION))
                 .limit(999)
-                .maxTime(10, SECONDS)
-                .maxTime(10, SECONDS)
                 .scope(new BsonDocument("scope", new BsonInt32(1)))
                 .sort(new BsonDocument("sort", new BsonInt32(1)))
                 .verbose(false);
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java
new file mode 100644
index 00000000000..b79d3a645d9
--- /dev/null
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.internal;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.TransactionOptions;
+import com.mongodb.WriteConcern;
+import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.connection.Cluster;
+import com.mongodb.internal.session.ServerSessionPool;
+import com.mongodb.reactivestreams.client.ChangeStreamPublisher;
+import com.mongodb.reactivestreams.client.ClientSession;
+import com.mongodb.reactivestreams.client.ListDatabasesPublisher;
+import com.mongodb.reactivestreams.client.MongoCluster;
+import org.bson.BsonDocument;
+import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistries;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mock;
+import reactor.core.publisher.Mono;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+
+
+public class MongoClusterImplTest extends TestHelper {
+
+    @Mock
+    private ClientSession clientSession;
+
+    private final MongoClusterImpl mongoCluster = createMongoCluster();
+    private final MongoOperationPublisher<Document> mongoOperationPublisher = mongoCluster.getMongoOperationPublisher();
+
+    @Test
+    public void withCodecRegistry() {
+        // Cannot do equality test as registries are wrapped
+        CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec());
+        MongoCluster newMongoCluster = mongoCluster.withCodecRegistry(codecRegistry);
+        assertTrue(newMongoCluster.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec);
+    }
+
+    @Test
+    public void withReadConcern() {
+        assertEquals(ReadConcern.AVAILABLE, mongoCluster.withReadConcern(ReadConcern.AVAILABLE).getReadConcern());
+    }
+
+    @Test
+    public void withReadPreference() {
+        assertEquals(ReadPreference.secondaryPreferred(), mongoCluster.withReadPreference(ReadPreference.secondaryPreferred())
+                .getReadPreference());
+    }
+
+    @Test
+    public void withTimeout() {
+        assertEquals(1000, mongoCluster.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS));
+    }
+
+    @Test
+    public void withWriteConcern() {
+        assertEquals(WriteConcern.MAJORITY, mongoCluster.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern());
+    }
+
+    @Test
+    void testListDatabases() {
+        assertAll("listDatabases",
+                  () -> assertAll("check validation",
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((Class<?>) null)),
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((ClientSession) null)),
+                                  () -> assertThrows(IllegalArgumentException.class,
+                                                     () -> mongoCluster.listDatabases(clientSession, null))),
+                  () -> {
+                      ListDatabasesPublisher<Document> expected =
+                              new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(), "Default");
+                  },
+                  () -> {
+                      ListDatabasesPublisher<Document> expected =
+                              new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession), "With session");
+                  },
+                  () -> {
+                      ListDatabasesPublisher<BsonDocument> expected =
+                              new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher
+                                      .withDocumentClass(BsonDocument.class));
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(BsonDocument.class), "Alternative class");
+                  },
+                  () -> {
+                      ListDatabasesPublisher<BsonDocument> expected =
+                              new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher
+                                      .withDocumentClass(BsonDocument.class));
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession, BsonDocument.class),
+                                                 "Alternative class with session");
+                  }
+        );
+    }
+
+    @Test
+    void testListDatabaseNames() {
+        assertAll("listDatabaseNames",
+                  () -> assertAll("check validation",
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabaseNames(null))),
+                  () -> {
+                      ListDatabasesPublisher<Document> expected =
+                              new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher).nameOnly(true);
+
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(), "Default");
+                  },
+                  () -> {
+                      ListDatabasesPublisher<Document> expected =
+                              new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher).nameOnly(true);
+
+                      assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(clientSession), "With session");
+                  }
+        );
+    }
+
+    @Test
+    void testWatch() {
+        List<Bson> pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}"));
+        assertAll("watch",
+                  () -> assertAll("check validation",
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((Class<?>) null)),
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((List<Bson>) null)),
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(pipeline, null)),
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((ClientSession) null)),
+                                  () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(null, pipeline)),
+                                  () -> assertThrows(IllegalArgumentException.class,
+                                                     () -> mongoCluster.watch(null, pipeline, Document.class))
+                  ),
+                  () -> {
+                      ChangeStreamPublisher<Document> expected =
+                              new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
+                                                              Document.class, emptyList(), ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(), "Default");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<Document> expected =
+                              new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
+                                                              Document.class, pipeline, ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline), "With pipeline");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<BsonDocument> expected =
+                              new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
+                                                              BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(BsonDocument.class),
+                                                 "With result class");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<BsonDocument> expected =
+                              new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"),
+                                                              BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline, BsonDocument.class),
+                                                 "With pipeline & result class");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<Document> expected =
+                              new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"),
+                                                              Document.class, emptyList(), ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession), "with session");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<Document> expected =
+                              new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"),
+                                                              Document.class, pipeline, ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline), "With session & pipeline");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<BsonDocument> expected =
+                              new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"),
+                                                              BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, BsonDocument.class),
+                                                 "With session & resultClass");
+                  },
+                  () -> {
+                      ChangeStreamPublisher<BsonDocument> expected =
+                              new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"),
+                                                              BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT);
+                      assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline, BsonDocument.class),
+                                                 "With clientSession, pipeline & result class");
+                  }
+        );
+    }
+
+    @Test
+    void testStartSession() {
+        MongoClusterImpl mongoCluster = createMongoCluster();
+
+        // Validation
+        assertThrows(IllegalArgumentException.class, () -> mongoCluster.startSession(null));
+
+        // Default
+        Mono<ClientSession> expected = mongoCluster.getClientSessionHelper()
+                .createClientSessionMono(ClientSessionOptions.builder().build(), OPERATION_EXECUTOR);
+        assertPublisherIsTheSameAs(expected, mongoCluster.startSession(), "Default");
+
+        // with options
+        ClientSessionOptions options = ClientSessionOptions.builder()
+                .causallyConsistent(true)
+                .defaultTransactionOptions(TransactionOptions.builder().readConcern(ReadConcern.LINEARIZABLE).build())
+                .build();
+        expected = mongoCluster.getClientSessionHelper().createClientSessionMono(options, OPERATION_EXECUTOR);
+        assertPublisherIsTheSameAs(expected, mongoCluster.startSession(options), "with options");
+
+    }
+
+    private MongoClusterImpl createMongoCluster() {
+        return new MongoClusterImpl(mock(Cluster.class), null, OPERATION_EXECUTOR, mock(ServerSessionPool.class),
+                mock(ClientSessionHelper.class), OPERATION_PUBLISHER);
+    }
+}
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java
index 1cd31102611..97b7bbf0d78 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java
@@ -18,6 +18,9 @@
 
 import com.mongodb.CreateIndexCommitQuorum;
 import com.mongodb.MongoNamespace;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
 import com.mongodb.bulk.BulkWriteResult;
 import com.mongodb.client.model.BulkWriteOptions;
 import com.mongodb.client.model.Collation;
@@ -52,8 +55,11 @@
 import com.mongodb.reactivestreams.client.DistinctPublisher;
 import com.mongodb.reactivestreams.client.FindPublisher;
 import com.mongodb.reactivestreams.client.ListIndexesPublisher;
+import com.mongodb.reactivestreams.client.MongoCollection;
 import org.bson.BsonDocument;
 import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistries;
+import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 import org.junit.jupiter.api.Test;
 import org.mockito.Mock;
@@ -65,7 +71,9 @@
 import static java.util.Collections.emptyList;
 import static java.util.Collections.singletonList;
 import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class MongoCollectionImplTest extends TestHelper {
@@ -80,6 +88,40 @@ public class MongoCollectionImplTest extends TestHelper {
     private final List<Bson> pipeline = singletonList(filter);
     private final Collation collation = Collation.builder().locale("de").build();
 
+    @Test
+    public void withDocumentClass() {
+        assertEquals(BsonDocument.class, collection.withDocumentClass(BsonDocument.class).getDocumentClass());
+    }
+
+    @Test
+    public void withCodecRegistry() {
+        // Cannot do equality test as registries are wrapped
+        CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec());
+        MongoCollection<Document> newCollection = collection.withCodecRegistry(codecRegistry);
+        assertTrue(newCollection.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec);
+    }
+
+    @Test
+    public void withReadConcern() {
+        assertEquals(ReadConcern.AVAILABLE, collection.withReadConcern(ReadConcern.AVAILABLE).getReadConcern());
+    }
+
+    @Test
+    public void withReadPreference() {
+        assertEquals(ReadPreference.secondaryPreferred(), collection.withReadPreference(ReadPreference.secondaryPreferred())
+                .getReadPreference());
+    }
+
+    @Test
+    public void withTimeout() {
+        assertEquals(1000, collection.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS));
+    }
+
+    @Test
+    public void withWriteConcern() {
+        assertEquals(WriteConcern.MAJORITY, collection.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern());
+    }
+
     @Test
     void testAggregate() {
         assertAll("Aggregate tests",
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java
index 77be004edda..f50e44a7db6 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java
@@ -16,7 +16,9 @@
 
 package com.mongodb.reactivestreams.client.internal;
 
+import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.CreateViewOptions;
@@ -27,19 +29,25 @@
 import com.mongodb.reactivestreams.client.ClientSession;
 import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher;
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher;
+import com.mongodb.reactivestreams.client.MongoDatabase;
 import org.bson.BsonDocument;
 import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistries;
+import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 import org.junit.jupiter.api.Test;
 import org.mockito.Mock;
 import org.reactivestreams.Publisher;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static java.util.Collections.emptyList;
 import static java.util.Collections.singletonList;
 import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class MongoDatabaseImplTest extends TestHelper {
@@ -49,6 +57,35 @@ public class MongoDatabaseImplTest extends TestHelper {
     private final MongoDatabaseImpl database = new MongoDatabaseImpl(OPERATION_PUBLISHER.withDatabase("db"));
     private final MongoOperationPublisher<Document> mongoOperationPublisher = database.getMongoOperationPublisher();
 
+    @Test
+    public void withCodecRegistry() {
+        // Cannot do equality test as registries are wrapped
+        CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec());
+        MongoDatabase newDatabase = database.withCodecRegistry(codecRegistry);
+        assertTrue(newDatabase.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec);
+    }
+
+    @Test
+    public void withReadConcern() {
+        assertEquals(ReadConcern.AVAILABLE, database.withReadConcern(ReadConcern.AVAILABLE).getReadConcern());
+    }
+
+    @Test
+    public void withReadPreference() {
+        assertEquals(ReadPreference.secondaryPreferred(), database.withReadPreference(ReadPreference.secondaryPreferred())
+                .getReadPreference());
+    }
+
+    @Test
+    public void withTimeout() {
+        assertEquals(1000, database.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS));
+    }
+
+    @Test
+    public void withWriteConcern() {
+        assertEquals(WriteConcern.MAJORITY, database.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern());
+    }
+
     @Test
     void testAggregate() {
         List<Bson> pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}"));
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java
new file mode 100644
index 00000000000..42d6bb14c5c
--- /dev/null
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.mongodb.reactivestreams.client.internal;
+
+
+import com.mongodb.MongoClientSettings;
+import com.mongodb.MongoNamespace;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import org.bson.BsonDocument;
+import org.bson.Document;
+import org.bson.UuidRepresentation;
+import org.bson.codecs.configuration.CodecRegistries;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+
+
+public class MongoOperationPublisherTest {
+
+    private static final OperationExecutor OPERATION_EXECUTOR;
+
+    static {
+        OPERATION_EXECUTOR = mock(OperationExecutor.class);
+        Mockito.lenient().doAnswer(invocation -> OPERATION_EXECUTOR)
+                .when(OPERATION_EXECUTOR)
+                .withTimeoutSettings(any());
+    }
+    private static final MongoNamespace MONGO_NAMESPACE = new MongoNamespace("a.b");
+
+    private static final MongoOperationPublisher<Document> DEFAULT_MOP = new MongoOperationPublisher<>(
+            MONGO_NAMESPACE, Document.class, MongoClientSettings.getDefaultCodecRegistry(), ReadPreference.primary(),
+            ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, UuidRepresentation.STANDARD,
+            null, TIMEOUT_SETTINGS_WITH_TIMEOUT, OPERATION_EXECUTOR);
+
+    @Test
+    public void withCodecRegistry() {
+        // Cannot do equality test as registries are wrapped
+        CodecRegistry codecRegistry = DEFAULT_MOP.withCodecRegistry(CodecRegistries.fromCodecs(new TestHelper.MyLongCodec())).getCodecRegistry();
+        assertTrue(codecRegistry.get(Long.class) instanceof TestHelper.MyLongCodec);
+    }
+
+    @Test
+    public void withDatabase() {
+        assertEquals(new MongoNamespace("c.ignored"), DEFAULT_MOP.withDatabase("c").getNamespace());
+    }
+
+    @Test
+    public void withDocumentClass() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withDocumentClass(Document.class));
+        assertEquals(BsonDocument.class, DEFAULT_MOP.withDocumentClass(BsonDocument.class).getDocumentClass());
+    }
+
+    @Test
+    public void withDatabaseAndDocumentClass() {
+        MongoOperationPublisher<BsonDocument> alternative = DEFAULT_MOP.withDatabaseAndDocumentClass("c", BsonDocument.class);
+        assertEquals(BsonDocument.class, alternative.getDocumentClass());
+        assertEquals(new MongoNamespace("c.ignored"), alternative.getNamespace());
+    }
+
+    @Test
+    public void withNamespaceAndDocumentClass() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class));
+
+        MongoOperationPublisher<BsonDocument> alternative = DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("c.d"),
+                BsonDocument.class);
+        assertEquals(BsonDocument.class, alternative.getDocumentClass());
+        assertEquals(new MongoNamespace("c.d"), alternative.getNamespace());
+    }
+
+
+    @Test
+    public void withNamespace() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class));
+        assertEquals(new MongoNamespace("c.d"), DEFAULT_MOP.withNamespace(new MongoNamespace("c.d")).getNamespace());
+    }
+
+    @Test
+    public void withReadConcern() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadConcern(ReadConcern.DEFAULT));
+        assertEquals(ReadConcern.AVAILABLE, DEFAULT_MOP.withReadConcern(ReadConcern.AVAILABLE).getReadConcern());
+    }
+
+    @Test
+    public void withReadPreference() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadPreference(ReadPreference.primary()));
+        assertEquals(ReadPreference.secondaryPreferred(), DEFAULT_MOP.withReadPreference(ReadPreference.secondaryPreferred())
+                .getReadPreference());
+    }
+
+    @Test
+    public void withTimeout() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(60_000, TimeUnit.MILLISECONDS));
+        assertEquals(1000, DEFAULT_MOP.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeoutMS());
+        assertThrows(IllegalArgumentException.class, () -> DEFAULT_MOP.withTimeout(500, TimeUnit.NANOSECONDS));
+    }
+
+    @Test
+    public void withWriteConcern() {
+        assertEquals(DEFAULT_MOP, DEFAULT_MOP.withWriteConcern(WriteConcern.ACKNOWLEDGED));
+        assertEquals(WriteConcern.MAJORITY, DEFAULT_MOP.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern());
+    }
+
+}
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java
index c293df899b4..46f4e86762b 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java
@@ -30,9 +30,14 @@
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.lang.NonNull;
 import com.mongodb.lang.Nullable;
+import org.bson.BsonReader;
+import org.bson.BsonWriter;
 import org.bson.Document;
 import org.bson.UuidRepresentation;
 import org.bson.codecs.BsonValueCodecProvider;
+import org.bson.codecs.Codec;
+import org.bson.codecs.DecoderContext;
+import org.bson.codecs.EncoderContext;
 import org.bson.codecs.configuration.CodecRegistry;
 import org.junit.jupiter.api.extension.ExtendWith;
 import org.mockito.Mock;
@@ -52,8 +57,10 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Function;
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS;
 import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry;
 import static java.util.Collections.emptyList;
+import static java.util.Collections.emptyMap;
 import static java.util.stream.Collectors.toList;
 import static java.util.stream.Collectors.toMap;
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders;
@@ -81,6 +88,9 @@ public class TestHelper {
 
     static {
         OperationExecutor executor = mock(OperationExecutor.class);
+        Mockito.lenient().doAnswer(invocation -> executor)
+                .when(executor).withTimeoutSettings(any());
+
         Mockito.lenient().doAnswer(invocation -> Mono.empty())
                 .when(executor)
                 .execute(any(), any(), any());
@@ -97,7 +107,7 @@ static MongoOperationPublisher<Document> createMongoOperationPublisher(final Ope
         return new MongoOperationPublisher<>(NAMESPACE, Document.class,
                                              getDefaultCodecRegistry(), ReadPreference.primary(), ReadConcern.DEFAULT,
                                              WriteConcern.ACKNOWLEDGED, true, true,
-                                             UuidRepresentation.STANDARD, null, executor);
+                                             UuidRepresentation.STANDARD, null, TIMEOUT_SETTINGS, executor);
     }
 
 
@@ -148,7 +158,10 @@ private static Map<String, Object> getClassGetterValues(final Object instance) {
     }
 
 
-    private static Map<String, Optional<Object>> getClassPrivateFieldValues(final Object instance) {
+    private static Map<String, Optional<Object>> getClassPrivateFieldValues(@Nullable final Object instance) {
+        if (instance == null) {
+            return emptyMap();
+        }
         return Arrays.stream(instance.getClass().getDeclaredFields())
                 .filter(field -> Modifier.isPrivate(field.getModifiers()))
                 .collect(toMap(Field::getName, field -> {
@@ -264,4 +277,21 @@ void configureBatchCursor() {
     public AsyncBatchCursor<Document> getBatchCursor() {
         return batchCursor;
     }
+
+    public static class MyLongCodec implements Codec<Long> {
+
+        @Override
+        public Long decode(final BsonReader reader, final DecoderContext decoderContext) {
+            return 42L;
+        }
+
+        @Override
+        public void encode(final BsonWriter writer, final Long value, final EncoderContext encoderContext) {
+        }
+
+        @Override
+        public Class<Long> getEncoderClass() {
+            return Long.class;
+        }
+    }
 }
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java
index 99c9642f8d6..6989d0b2d2e 100644
--- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.AsyncReadOperation;
 import com.mongodb.internal.operation.AsyncWriteOperation;
 import com.mongodb.lang.Nullable;
@@ -59,6 +60,16 @@ public <T> Mono<T> execute(final AsyncWriteOperation<T> operation, final ReadCon
         return createMono();
     }
 
+    @Override
+    public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) {
+        return this;
+    }
+
+    @Override
+    public TimeoutSettings getTimeoutSettings() {
+        throw new UnsupportedOperationException("Not supported");
+    }
+
     <T> Mono<T> createMono() {
         return Mono.create(sink -> {
            Object response = responses.remove(0);
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java
new file mode 100644
index 00000000000..01924c61f0e
--- /dev/null
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.internal;
+
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.reactivestreams.client.MongoCollection;
+import com.mongodb.reactivestreams.client.MongoDatabase;
+import org.bson.Document;
+import org.junit.jupiter.api.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.internal.mockito.MongoMockito.mock;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout;
+import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.longThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoInteractions;
+import static org.mockito.Mockito.when;
+
+@SuppressWarnings("unchecked")
+class TimeoutHelperTest {
+
+    private static final String TIMEOUT_ERROR_MESSAGE = "message";
+    private static final String DEFAULT_TIMEOUT_ERROR_MESSAGE = "Operation exceeded the timeout limit.";
+
+    @Test
+    void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, null);
+        MongoCollection<Document> monoResult = collectionWithTimeoutMono(collection, null).block();
+        MongoCollection<Document> monoResultDeferred = collectionWithTimeoutDeferred(collection, null).block();
+
+        //then
+        assertEquals(collection, result);
+        assertEquals(collection, monoResult);
+        assertEquals(collection, monoResultDeferred);
+    }
+
+    @Test
+    void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null);
+        MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, null).block();
+
+        //then
+        assertEquals(database, result);
+        assertEquals(database, monoResultDeferred);
+    }
+
+    @Test
+    void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() {
+        //given
+        MongoCollection<Document> collectionWithTimeout = mock(MongoCollection.class);
+        MongoCollection<Document> collection = mock(MongoCollection.class, mongoCollection -> {
+            when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout);
+        });
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, Timeout.infinite());
+        MongoCollection<Document> monoResult = collectionWithTimeoutMono(collection, Timeout.infinite()).block();
+        MongoCollection<Document> monoResultDeferred = collectionWithTimeoutDeferred(collection, Timeout.infinite()).block();
+
+        //then
+        assertEquals(collectionWithTimeout, result);
+        assertEquals(collectionWithTimeout, monoResult);
+        assertEquals(collectionWithTimeout, monoResultDeferred);
+        verify(collection, times(3))
+                .withTimeout(0L, TimeUnit.MILLISECONDS);
+    }
+
+    @Test
+    void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() {
+        //given
+        MongoDatabase databaseWithTimeout = mock(MongoDatabase.class);
+        MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> {
+            when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout);
+        });
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite());
+        MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()).block();
+
+        //then
+        assertEquals(databaseWithTimeout, result);
+        assertEquals(databaseWithTimeout, monoResultDeferred);
+        verify(database, times(2))
+                .withTimeout(0L, TimeUnit.MILLISECONDS);
+    }
+
+    @Test
+    void shouldSetRemainingTimeoutOnCollectionWhenTimeout() {
+        //given
+        MongoCollection<Document> collectionWithTimeout = mock(MongoCollection.class);
+        MongoCollection<Document> collection = mock(MongoCollection.class, mongoCollection -> {
+            when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout);
+        });
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, timeout);
+        MongoCollection<Document> monoResult = collectionWithTimeoutMono(collection, timeout).block();
+        MongoCollection<Document> monoResultDeferred = collectionWithTimeoutDeferred(collection, timeout).block();
+
+        //then
+        verify(collection, times(3))
+                .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS));
+        assertEquals(collectionWithTimeout, result);
+        assertEquals(collectionWithTimeout, monoResult);
+        assertEquals(collectionWithTimeout, monoResultDeferred);
+    }
+
+    @Test
+    void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() {
+        //given
+        MongoDatabase databaseWithTimeout = mock(MongoDatabase.class);
+        MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> {
+            when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout);
+        });
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout);
+        MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block();
+
+        //then
+        verify(database, times(2))
+                .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS));
+        assertEquals(databaseWithTimeout, result);
+        assertEquals(databaseWithTimeout, monoResultDeferred);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoOperationTimeoutException mongoExecutionTimeoutException =
+                assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout));
+        MongoOperationTimeoutException mongoExecutionTimeoutExceptionMono =
+                assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block());
+        MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred =
+                assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block());
+
+        //then
+        assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionMono.getMessage());
+        assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage());
+        assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage());
+        verifyNoInteractions(collection);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoOperationTimeoutException mongoExecutionTimeoutException =
+                assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout));
+        MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred =
+                assertThrows(MongoOperationTimeoutException.class,
+                        () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout)
+                                .block());
+
+        //then
+        assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage());
+        assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage());
+        verifyNoInteractions(database);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+        Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout));
+        assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block());
+        assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block());
+
+        //then
+
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+        Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout));
+        assertThrows(MongoOperationTimeoutException.class,
+                () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block());
+
+        //then
+        verifyNoInteractions(database);
+    }
+}
diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java
new file mode 100644
index 00000000000..38d19647fd7
--- /dev/null
+++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.reactivestreams.client.internal.gridfs;
+
+import com.mongodb.ClusterFixture;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.Fixture;
+import com.mongodb.client.test.CollectionHelper;
+import com.mongodb.event.CommandEvent;
+import com.mongodb.internal.connection.TestCommandListener;
+import com.mongodb.reactivestreams.client.TestEventPublisher;
+import com.mongodb.reactivestreams.client.MongoClient;
+import com.mongodb.reactivestreams.client.MongoClients;
+import com.mongodb.reactivestreams.client.MongoDatabase;
+import com.mongodb.reactivestreams.client.TestSubscriber;
+import com.mongodb.reactivestreams.client.gridfs.GridFSBucket;
+import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets;
+import org.bson.types.ObjectId;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.reactivestreams.Subscription;
+import reactor.core.publisher.Flux;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.ClusterFixture.TIMEOUT_DURATION;
+import static com.mongodb.ClusterFixture.serverVersionAtLeast;
+import static com.mongodb.client.Fixture.getDefaultDatabaseName;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+
+class GridFSUploadPublisherTest {
+    private static final String GRID_FS_BUCKET_NAME = "db.fs";
+    private TestCommandListener commandListener;
+
+    protected MongoClientSettings.Builder getMongoClientSettingsBuilder() {
+        commandListener.reset();
+        return Fixture.getMongoClientSettingsBuilder()
+                .readConcern(ReadConcern.MAJORITY)
+                .writeConcern(WriteConcern.MAJORITY)
+                .readPreference(ReadPreference.primary())
+                .addCommandListener(commandListener);
+    }
+
+    @Test
+    void shouldTimeoutWhenSourcePublisherCompletionExceedsOverallOperationTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        //given
+        try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder()
+                .timeout(rtt + 800, TimeUnit.MILLISECONDS).build())) {
+            MongoDatabase database = client.getDatabase(getDefaultDatabaseName());
+            GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME);
+
+            TestEventPublisher<ByteBuffer> eventPublisher = new TestEventPublisher<>();
+            TestSubscriber<ObjectId> testSubscriber = new TestSubscriber<>();
+
+            //when
+            gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream())
+                    .subscribe(testSubscriber);
+            testSubscriber.requestMore(1);
+
+            //then
+            testSubscriber.assertTerminalEvent();
+
+            List<Throwable> onErrorEvents = testSubscriber.getOnErrorEvents();
+            assertEquals(1, onErrorEvents.size());
+
+            Throwable throwable = onErrorEvents.get(0);
+            assertEquals(MongoOperationTimeoutException.class, throwable.getClass());
+            assertEquals("GridFS waiting for data from the source Publisher exceeded the timeout limit.", throwable.getMessage());
+
+            //assert no chunk has been inserted as we have not sent any data from source publisher.
+            for (CommandEvent event : commandListener.getEvents()) {
+                assertNotEquals("insert", event.getCommandName());
+            }
+        }
+    }
+
+    @Test
+    void shouldCancelSubscriptionToSourceWhenOperationTimeoutOccurs() throws Exception {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        //given
+        try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder()
+                .timeout(rtt + 1000, TimeUnit.MILLISECONDS).build())) {
+            MongoDatabase database = client.getDatabase(getDefaultDatabaseName());
+            GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME);
+
+            TestEventPublisher<ByteBuffer> testEventPublisher = new TestEventPublisher<>();
+            CompletableFuture<Subscription> subscriptionSignal = new CompletableFuture<>();
+            Flux<ByteBuffer> eventStream = testEventPublisher.getEventStream().doOnSubscribe(subscriptionSignal::complete);
+            TestSubscriber<ObjectId> testSubscriber = new TestSubscriber<>();
+
+            //when
+            gridFsBucket.uploadFromPublisher("filename", eventStream)
+                    .subscribe(testSubscriber);
+            testSubscriber.requestMore(1);
+
+            //then
+            subscriptionSignal.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS);
+            assertEquals(1, testEventPublisher.currentSubscriberCount());
+            //We wait for timeout to occur here
+            testSubscriber.assertTerminalEvent();
+            assertEquals(0, testEventPublisher.currentSubscriberCount());
+        }
+    }
+
+    @BeforeEach
+    public void setUp() {
+        commandListener = new TestCommandListener();
+    }
+
+    @AfterEach
+    public void tearDown() {
+        CollectionHelper.dropDatabase(getDefaultDatabaseName());
+    }
+}
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala b/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala
index 93ab4bca823..192cf1ee912 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/ClientSideEncryptionTest.scala
@@ -39,7 +39,8 @@ class ClientSideEncryptionTest(
     mongoClient.getDatabase(databaseName)
 
   @After
-  def cleanUp(): Unit = {
+  override def cleanUp(): Unit = {
+    super.cleanUp()
     if (mongoClient != null) mongoClient.close()
   }
 }
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala
index 35c6d88defb..d9cec1ede39 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala
@@ -17,6 +17,7 @@ package org.mongodb.scala.syncadapter
 
 import com.mongodb.ExplainVerbosity
 import com.mongodb.client.AggregateIterable
+import org.mongodb.scala.TimeoutMode
 import com.mongodb.client.model.Collation
 import org.bson.conversions.Bson
 import org.bson.{ BsonValue, Document }
@@ -42,6 +43,11 @@ case class SyncAggregateIterable[T](wrapped: AggregateObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): AggregateIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def maxTime(maxTime: Long, timeUnit: TimeUnit): AggregateIterable[T] = {
     wrapped.maxTime(maxTime, timeUnit)
     this
@@ -102,5 +108,4 @@ case class SyncAggregateIterable[T](wrapped: AggregateObservable[T])
       .explain[E](verbosity)(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass))
       .toFuture()
       .get()
-
 }
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala
index 47687911bad..a517d027cd2 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala
@@ -16,12 +16,13 @@
 
 package org.mongodb.scala.syncadapter
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.Collation
 import com.mongodb.client.model.changestream.{ ChangeStreamDocument, FullDocument, FullDocumentBeforeChange }
 import com.mongodb.client.{ ChangeStreamIterable, MongoChangeStreamCursor }
 import com.mongodb.{ ServerAddress, ServerCursor }
 import org.bson.{ BsonDocument, BsonTimestamp, BsonValue }
-import org.mongodb.scala.ChangeStreamObservable
+import org.mongodb.scala.{ ChangeStreamObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala
index 38a9618a281..2866ce7427d 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala
@@ -18,6 +18,7 @@ package org.mongodb.scala.syncadapter
 
 import com.mongodb.{ ClientSessionOptions, MongoInterruptedException, ServerAddress, TransactionOptions }
 import com.mongodb.client.{ ClientSession => JClientSession, TransactionBody }
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.session.ServerSession
 import org.bson.{ BsonDocument, BsonTimestamp }
 import org.mongodb.scala._
@@ -93,4 +94,6 @@ case class SyncClientSession(wrapped: ClientSession, originator: Object) extends
         throw new MongoInterruptedException(null, e)
     }
   }
+
+  override def getTimeoutContext: TimeoutContext = wrapped.getTimeoutContext
 }
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala
index 5f007071db3..b105ac0897c 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala
@@ -20,7 +20,7 @@ import com.mongodb.client.DistinctIterable
 import com.mongodb.client.model.Collation
 import org.bson.BsonValue
 import org.bson.conversions.Bson
-import org.mongodb.scala.DistinctObservable
+import org.mongodb.scala.{ DistinctObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 
@@ -42,6 +42,11 @@ case class SyncDistinctIterable[T](wrapped: DistinctObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): DistinctIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def collation(collation: Collation): DistinctIterable[T] = {
     wrapped.collation(collation)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala
index e66f70913b6..505241ab39a 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala
@@ -21,9 +21,9 @@ import com.mongodb.client.model.Collation
 import com.mongodb.{ CursorType, ExplainVerbosity }
 import org.bson.Document
 import org.bson.conversions.Bson
-import org.mongodb.scala.FindObservable
 import org.mongodb.scala.bson.BsonValue
 import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
+import org.mongodb.scala.{ FindObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 import scala.reflect.ClassTag
@@ -84,6 +84,11 @@ case class SyncFindIterable[T](wrapped: FindObservable[T]) extends SyncMongoIter
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): FindIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def collation(collation: Collation): FindIterable[T] = {
     wrapped.collation(collation)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala
index 08fac0c9bb3..aa121ae99cf 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala
@@ -17,8 +17,8 @@ package org.mongodb.scala.syncadapter
 
 import com.mongodb.client.ListCollectionsIterable
 import org.bson.conversions.Bson
-import org.mongodb.scala.ListCollectionsObservable
 import org.mongodb.scala.bson.BsonValue
+import org.mongodb.scala.{ ListCollectionsObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 
@@ -40,6 +40,11 @@ case class SyncListCollectionsIterable[T](wrapped: ListCollectionsObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def comment(comment: String): ListCollectionsIterable[T] = {
     wrapped.comment(comment)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala
index 0b5c82d1fc0..aa841c1be0a 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala
@@ -2,8 +2,8 @@ package org.mongodb.scala.syncadapter
 
 import com.mongodb.client.ListDatabasesIterable
 import org.bson.conversions.Bson
-import org.mongodb.scala.ListDatabasesObservable
 import org.mongodb.scala.bson.BsonValue
+import org.mongodb.scala.{ ListDatabasesObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 
@@ -20,6 +20,11 @@ case class SyncListDatabasesIterable[T](wrapped: ListDatabasesObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def filter(filter: Bson): ListDatabasesIterable[T] = {
     wrapped.filter(filter)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala
index 22194de53aa..86db80bc6e4 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala
@@ -17,8 +17,8 @@
 package org.mongodb.scala.syncadapter
 
 import com.mongodb.client.ListIndexesIterable
-import org.mongodb.scala.ListIndexesObservable
 import org.mongodb.scala.bson.BsonValue
+import org.mongodb.scala.{ ListIndexesObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 
@@ -35,6 +35,11 @@ case class SyncListIndexesIterable[T](wrapped: ListIndexesObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def comment(comment: String): ListIndexesIterable[T] = {
     wrapped.comment(comment)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala
index 6fb7a6d2199..672b97aff9e 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala
@@ -20,8 +20,8 @@ import com.mongodb.ExplainVerbosity
 import com.mongodb.client.ListSearchIndexesIterable
 import com.mongodb.client.model.Collation
 import org.bson.{ BsonValue, Document }
-import org.mongodb.scala.ListSearchIndexesObservable
 import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
+import org.mongodb.scala.{ ListSearchIndexesObservable, TimeoutMode }
 
 import java.util.concurrent.TimeUnit
 import scala.reflect.ClassTag
@@ -45,6 +45,11 @@ case class SyncListSearchIndexesIterable[T](wrapped: ListSearchIndexesObservable
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListSearchIndexesIterable[T] = {
     wrapped.maxTime(maxTime, timeUnit)
     this
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala
index 6fce83ffa4b..73af2f6f62a 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala
@@ -16,12 +16,12 @@
 
 package org.mongodb.scala.syncadapter
 
-import java.util.concurrent.TimeUnit
-
 import com.mongodb.client.MapReduceIterable
 import com.mongodb.client.model.{ Collation, MapReduceAction }
 import org.bson.conversions.Bson
-import org.mongodb.scala.MapReduceObservable
+import org.mongodb.scala.{ MapReduceObservable, TimeoutMode }
+
+import java.util.concurrent.TimeUnit
 
 case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T])
     extends SyncMongoIterable[T]
@@ -88,6 +88,11 @@ case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T])
     this
   }
 
+  override def timeoutMode(timeoutMode: TimeoutMode): MapReduceIterable[T] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   override def bypassDocumentValidation(bypassDocumentValidation: java.lang.Boolean): MapReduceIterable[T] = {
     wrapped.bypassDocumentValidation(bypassDocumentValidation)
     this
@@ -97,4 +102,5 @@ case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T])
     wrapped.collation(collation)
     this
   }
+
 }
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala
index 9bb1ec9d6d8..4daa6d94ef1 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala
@@ -11,84 +11,10 @@ import scala.collection.JavaConverters._
 import scala.concurrent.Await
 import scala.reflect.ClassTag
 
-case class SyncMongoClient(wrapped: MongoClient) extends JMongoClient {
-
-  override def getDatabase(databaseName: String): JMongoDatabase =
-    SyncMongoDatabase(wrapped.getDatabase(databaseName))
-
-  override def startSession: ClientSession =
-    SyncClientSession(Await.result(wrapped.startSession().head(), WAIT_DURATION), this)
-
-  override def startSession(options: ClientSessionOptions): ClientSession =
-    SyncClientSession(Await.result(wrapped.startSession(options).head(), WAIT_DURATION), this)
+case class SyncMongoClient(wrapped: MongoClient) extends SyncMongoCluster(wrapped) with JMongoClient {
 
   override def close(): Unit = wrapped.close()
 
-  override def listDatabaseNames = throw new UnsupportedOperationException
-
-  override def listDatabaseNames(clientSession: ClientSession) = throw new UnsupportedOperationException
-
-  override def listDatabases = new SyncListDatabasesIterable[Document](wrapped.listDatabases[Document]())
-
-  override def listDatabases(clientSession: ClientSession) = throw new UnsupportedOperationException
-
-  override def listDatabases[TResult](resultClass: Class[TResult]) =
-    new SyncListDatabasesIterable[TResult](
-      wrapped.listDatabases[TResult]()(
-        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
-        ClassTag(resultClass)
-      )
-    )
-
-  override def listDatabases[TResult](clientSession: ClientSession, resultClass: Class[TResult]) =
-    throw new UnsupportedOperationException
-
-  override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]())
-
-  override def watch[TResult](resultClass: Class[TResult]) =
-    new SyncChangeStreamIterable[TResult](
-      wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass))
-    )
-
-  override def watch(pipeline: java.util.List[_ <: Bson]) =
-    new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq))
-
-  override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) =
-    new SyncChangeStreamIterable[TResult](
-      wrapped.watch[TResult](pipeline.asScala.toSeq)(
-        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
-        ClassTag(resultClass)
-      )
-    )
-
-  override def watch(clientSession: ClientSession) =
-    new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession)))
-
-  override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) =
-    new SyncChangeStreamIterable[TResult](
-      wrapped.watch(unwrap(clientSession))(
-        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
-        ClassTag(resultClass)
-      )
-    )
-
-  override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) =
-    new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession), pipeline.asScala.toSeq))
-
-  override def watch[TResult](
-      clientSession: ClientSession,
-      pipeline: java.util.List[_ <: Bson],
-      resultClass: Class[TResult]
-  ) =
-    new SyncChangeStreamIterable[TResult](
-      wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)(
-        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
-        ClassTag(resultClass)
-      )
-    )
-
   override def getClusterDescription = throw new UnsupportedOperationException
 
-  private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession =
-    clientSession.asInstanceOf[SyncClientSession].wrapped
 }
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala
new file mode 100644
index 00000000000..3871aded144
--- /dev/null
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala
@@ -0,0 +1,126 @@
+package org.mongodb.scala.syncadapter
+
+import com.mongodb.{ ClientSessionOptions, ReadConcern, ReadPreference, WriteConcern }
+import com.mongodb.client.{ ClientSession, MongoCluster => JMongoCluster, MongoDatabase => JMongoDatabase }
+import org.bson.Document
+import org.bson.codecs.configuration.CodecRegistry
+import org.bson.conversions.Bson
+import org.mongodb.scala.MongoCluster
+import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
+
+import java.util.concurrent.TimeUnit
+import scala.collection.JavaConverters._
+import scala.concurrent.Await
+import scala.concurrent.duration.Duration
+import scala.reflect.ClassTag
+
+object SyncMongoCluster {
+
+  def apply(wrapped: MongoCluster): SyncMongoCluster = new SyncMongoCluster(wrapped)
+}
+
+class SyncMongoCluster(wrapped: MongoCluster) extends JMongoCluster {
+
+  override def getCodecRegistry: CodecRegistry = wrapped.codecRegistry
+
+  override def getReadPreference: ReadPreference = wrapped.readPreference
+
+  override def getWriteConcern: WriteConcern = wrapped.writeConcern
+
+  override def getReadConcern: ReadConcern = wrapped.readConcern
+
+  override def getTimeout(timeUnit: TimeUnit): java.lang.Long = {
+    val timeout = wrapped.timeout.map(d => timeUnit.convert(d.toMillis, TimeUnit.MILLISECONDS))
+    if (timeout.isDefined) timeout.get else null
+  }
+
+  override def withCodecRegistry(codecRegistry: CodecRegistry): JMongoCluster =
+    SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry))
+
+  override def withReadPreference(readPreference: ReadPreference): JMongoCluster =
+    SyncMongoCluster(wrapped.withReadPreference(readPreference))
+
+  override def withWriteConcern(writeConcern: WriteConcern): JMongoCluster =
+    SyncMongoCluster(wrapped.withWriteConcern(writeConcern))
+
+  override def withReadConcern(readConcern: ReadConcern): JMongoCluster =
+    SyncMongoCluster(wrapped.withReadConcern(readConcern))
+
+  override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCluster =
+    SyncMongoCluster(wrapped.withTimeout(Duration(timeout, timeUnit)))
+
+  override def getDatabase(databaseName: String): JMongoDatabase =
+    SyncMongoDatabase(wrapped.getDatabase(databaseName))
+
+  override def startSession: ClientSession =
+    SyncClientSession(Await.result(wrapped.startSession().head(), WAIT_DURATION), this)
+
+  override def startSession(options: ClientSessionOptions): ClientSession =
+    SyncClientSession(Await.result(wrapped.startSession(options).head(), WAIT_DURATION), this)
+
+  override def listDatabaseNames = throw new UnsupportedOperationException
+
+  override def listDatabaseNames(clientSession: ClientSession) = throw new UnsupportedOperationException
+
+  override def listDatabases = new SyncListDatabasesIterable[Document](wrapped.listDatabases[Document]())
+
+  override def listDatabases(clientSession: ClientSession) = throw new UnsupportedOperationException
+
+  override def listDatabases[TResult](resultClass: Class[TResult]) =
+    new SyncListDatabasesIterable[TResult](
+      wrapped.listDatabases[TResult]()(
+        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
+        ClassTag(resultClass)
+      )
+    )
+
+  override def listDatabases[TResult](clientSession: ClientSession, resultClass: Class[TResult]) =
+    throw new UnsupportedOperationException
+
+  override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]())
+
+  override def watch[TResult](resultClass: Class[TResult]) =
+    new SyncChangeStreamIterable[TResult](
+      wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass))
+    )
+
+  override def watch(pipeline: java.util.List[_ <: Bson]) =
+    new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq))
+
+  override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) =
+    new SyncChangeStreamIterable[TResult](
+      wrapped.watch[TResult](pipeline.asScala.toSeq)(
+        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
+        ClassTag(resultClass)
+      )
+    )
+
+  override def watch(clientSession: ClientSession) =
+    new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession)))
+
+  override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) =
+    new SyncChangeStreamIterable[TResult](
+      wrapped.watch(unwrap(clientSession))(
+        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
+        ClassTag(resultClass)
+      )
+    )
+
+  override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) =
+    new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession), pipeline.asScala.toSeq))
+
+  override def watch[TResult](
+      clientSession: ClientSession,
+      pipeline: java.util.List[_ <: Bson],
+      resultClass: Class[TResult]
+  ) =
+    new SyncChangeStreamIterable[TResult](
+      wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)(
+        DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document],
+        ClassTag(resultClass)
+      )
+    )
+
+  private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession =
+    clientSession.asInstanceOf[SyncClientSession].wrapped
+}
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala
index 380c6d272f3..7d97d794c42 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala
@@ -34,7 +34,9 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
 import org.mongodb.scala.result.{ InsertManyResult, InsertOneResult }
 
 import java.util
+import java.util.concurrent.TimeUnit
 import scala.collection.JavaConverters._
+import scala.concurrent.duration.{ Duration, MILLISECONDS }
 import scala.reflect.ClassTag
 
 case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCollection[T] {
@@ -53,6 +55,13 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol
 
   override def getReadConcern: ReadConcern = wrapped.readConcern
 
+  override def getTimeout(timeUnit: TimeUnit): java.lang.Long = {
+    wrapped.timeout match {
+      case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS)
+      case None        => null
+    }
+  }
+
   override def withDocumentClass[NewTDocument](clazz: Class[NewTDocument]): JMongoCollection[NewTDocument] =
     SyncMongoCollection[NewTDocument](
       wrapped.withDocumentClass[NewTDocument]()(
@@ -73,6 +82,9 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol
   override def withReadConcern(readConcern: ReadConcern): JMongoCollection[T] =
     SyncMongoCollection[T](wrapped.withReadConcern(readConcern))
 
+  override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCollection[T] =
+    SyncMongoCollection[T](wrapped.withTimeout(Duration(timeout, timeUnit)))
+
   override def countDocuments: Long = wrapped.countDocuments().toFuture().get()
 
   override def countDocuments(filter: Bson): Long = wrapped.countDocuments(filter).toFuture().get()
@@ -556,7 +568,7 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol
 
   override def createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition).toFuture().get()
 
-  override def createSearchIndexes(searchIndexModels: util.List[SearchIndexModel]): util.List[String] =
+  override def createSearchIndexes(searchIndexModels: java.util.List[SearchIndexModel]): java.util.List[String] =
     wrapped.createSearchIndexes(searchIndexModels.asScala.toList).toFuture().get().asJava
 
   override def updateSearchIndex(indexName: String, definition: Bson): Unit =
diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala
index 036d5589957..548289fd938 100644
--- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala
+++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala
@@ -24,7 +24,9 @@ import org.bson.conversions.Bson
 import org.mongodb.scala.MongoDatabase
 import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
 
+import java.util.concurrent.TimeUnit
 import scala.collection.JavaConverters._
+import scala.concurrent.duration.MILLISECONDS
 import scala.reflect.ClassTag
 
 case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase {
@@ -39,6 +41,13 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase {
 
   override def getReadConcern: ReadConcern = wrapped.readConcern
 
+  override def getTimeout(timeUnit: TimeUnit): java.lang.Long = {
+    wrapped.timeout match {
+      case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS)
+      case None        => null
+    }
+  }
+
   override def withCodecRegistry(codecRegistry: CodecRegistry) =
     SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry))
 
@@ -48,6 +57,8 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase {
 
   override def withReadConcern(readConcern: ReadConcern) = throw new UnsupportedOperationException
 
+  override def withTimeout(timeout: Long, timeUnit: TimeUnit) = throw new UnsupportedOperationException
+
   override def getCollection(collectionName: String) =
     SyncMongoCollection[Document](wrapped.getCollection(collectionName))
 
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala
index 20d5db9fd64..1a360c1a7c1 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala
@@ -17,6 +17,7 @@
 package org.mongodb.scala
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.{ Alpha, Reason }
 
 import java.util.concurrent.TimeUnit
 import com.mongodb.reactivestreams.client.AggregatePublisher
@@ -198,6 +199,28 @@ case class AggregateObservable[TResult](private val wrapped: AggregatePublisher[
    */
   def toCollection(): SingleObservable[Unit] = wrapped.toCollection()
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * If the `timeout` is set then:
+   *
+   * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME`
+   * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error
+   *   to configure it as: `TimeoutMode.CURSOR_LIFETIME`
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): AggregateObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala
index b803ad54a1c..4a50d7767e1 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala
@@ -16,6 +16,8 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
+
 import java.util.concurrent.TimeUnit
 import com.mongodb.reactivestreams.client.DistinctPublisher
 import org.mongodb.scala.bson.BsonValue
@@ -109,6 +111,22 @@ case class DistinctObservable[TResult](private val wrapped: DistinctPublisher[TR
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): DistinctObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala
index 575ca66e8c8..c7cb7a158ae 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala
@@ -16,6 +16,7 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
 import com.mongodb.reactivestreams.client.FindPublisher
 import com.mongodb.{ CursorType, ExplainVerbosity }
 import org.mongodb.scala.bson.BsonValue
@@ -332,6 +333,28 @@ case class FindObservable[TResult](private val wrapped: FindPublisher[TResult])
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * If the `timeout` is set then:
+   *
+   * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME`
+   * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error
+   * to configure it as: `TimeoutMode.CURSOR_LIFETIME`
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): FindObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Explain the execution plan for this operation with the server's default verbosity level
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala
index 65b5b61a5d4..c73fbb7118e 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala
@@ -16,6 +16,8 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
+
 import java.util.concurrent.TimeUnit
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher
 import org.mongodb.scala.bson.BsonValue
@@ -94,6 +96,22 @@ case class ListCollectionsObservable[TResult](wrapped: ListCollectionsPublisher[
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala
index 1d389eb476e..0b5d5bf2f93 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala
@@ -16,6 +16,8 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
+
 import java.util.concurrent.TimeUnit
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher
 import org.mongodb.scala.bson.BsonValue
@@ -123,6 +125,22 @@ case class ListDatabasesObservable[TResult](wrapped: ListDatabasesPublisher[TRes
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala
index 8de986edde0..fa8e3d1b24d 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala
@@ -16,6 +16,8 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
+
 import java.util.concurrent.TimeUnit
 import com.mongodb.reactivestreams.client.ListIndexesPublisher
 import org.mongodb.scala.bson.BsonValue
@@ -81,6 +83,22 @@ case class ListIndexesObservable[TResult](wrapped: ListIndexesPublisher[TResult]
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): ListIndexesObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala
index 16b471a21e3..3987e830732 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala
@@ -17,6 +17,7 @@
 package org.mongodb.scala
 
 import com.mongodb.ExplainVerbosity
+import com.mongodb.annotations.{ Alpha, Reason }
 import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher
 import org.mongodb.scala.bson.BsonValue
 import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
@@ -122,6 +123,28 @@ case class ListSearchIndexesObservable[TResult](wrapped: ListSearchIndexesPublis
     this
   }
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * If the `timeout` is set then:
+   *
+   * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME`
+   * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error
+   * to configure it as: `TimeoutMode.CURSOR_LIFETIME`
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala
index 9e6ed2b2158..0ccabdaea62 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala
@@ -16,8 +16,9 @@
 
 package org.mongodb.scala
 
-import java.util.concurrent.TimeUnit
+import com.mongodb.annotations.{ Alpha, Reason }
 
+import java.util.concurrent.TimeUnit
 import com.mongodb.client.model.MapReduceAction
 import com.mongodb.reactivestreams.client.MapReducePublisher
 import org.mongodb.scala.bson.conversions.Bson
@@ -221,6 +222,22 @@ case class MapReduceObservable[TResult](wrapped: MapReducePublisher[TResult]) ex
    */
   def toCollection(): SingleObservable[Unit] = wrapped.toCollection()
 
+  /**
+   * Sets the timeoutMode for the cursor.
+   *
+   * Requires the `timeout` to be set, either in the [[com.mongodb.MongoClientSettings]],
+   * via [[MongoDatabase]] or via [[MongoCollection]]
+   *
+   * @param timeoutMode the timeout mode
+   * @return this
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def timeoutMode(timeoutMode: TimeoutMode): MapReduceObservable[TResult] = {
+    wrapped.timeoutMode(timeoutMode)
+    this
+  }
+
   /**
    * Helper to return a single observable limited to the first result.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala
index c370077a7d2..c6849c550c1 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala
@@ -16,18 +16,13 @@
 
 package org.mongodb.scala
 
-import java.io.Closeable
-
 import com.mongodb.connection.ClusterDescription
 import com.mongodb.reactivestreams.client.{ MongoClient => JMongoClient, MongoClients }
 import org.bson.codecs.configuration.CodecRegistries.{ fromProviders, fromRegistries }
 import org.bson.codecs.configuration.CodecRegistry
-import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
 import org.mongodb.scala.bson.codecs.{ DocumentCodecProvider, IterableCodecProvider }
-import org.mongodb.scala.bson.conversions.Bson
 
-import scala.collection.JavaConverters._
-import scala.reflect.ClassTag
+import java.io.Closeable
 
 /**
  * Companion object for creating new [[MongoClient]] instances
@@ -116,36 +111,7 @@ object MongoClient {
  * @param wrapped the underlying java MongoClient
  * @since 1.0
  */
-case class MongoClient(private val wrapped: JMongoClient) extends Closeable {
-
-  /**
-   * Creates a client session.
-   *
-   * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations.
-   *
-   * @since 2.4
-   * @note Requires MongoDB 3.6 or greater
-   */
-  def startSession(): SingleObservable[ClientSession] = wrapped.startSession()
-
-  /**
-   * Creates a client session.
-   *
-   * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations.
-   *
-   * @param options  the options for the client session
-   * @since 2.2
-   * @note Requires MongoDB 3.6 or greater
-   */
-  def startSession(options: ClientSessionOptions): SingleObservable[ClientSession] = wrapped.startSession(options)
-
-  /**
-   * Gets the database with the given name.
-   *
-   * @param name the name of the database
-   * @return the database
-   */
-  def getDatabase(name: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(name))
+case class MongoClient(private val wrapped: JMongoClient) extends MongoCluster(wrapped) with Closeable {
 
   /**
    * Close the client, which will close all underlying cached resources, including, for example,
@@ -153,118 +119,15 @@ case class MongoClient(private val wrapped: JMongoClient) extends Closeable {
    */
   def close(): Unit = wrapped.close()
 
-  /**
-   * Get a list of the database names
-   *
-   * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]]
-   * @return an iterable containing all the names of all the databases
-   */
-  def listDatabaseNames(): Observable[String] = wrapped.listDatabaseNames()
-
-  /**
-   * Get a list of the database names
-   *
-   * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]]
-   *
-   * @param clientSession the client session with which to associate this operation
-   * @return an iterable containing all the names of all the databases
-   * @since 2.2
-   * @note Requires MongoDB 3.6 or greater
-   */
-  def listDatabaseNames(clientSession: ClientSession): Observable[String] = wrapped.listDatabaseNames(clientSession)
-
-  /**
-   * Gets the list of databases
-   *
-   * @tparam TResult   the type of the class to use instead of `Document`.
-   * @return the fluent list databases interface
-   */
-  def listDatabases[TResult]()(
-      implicit e: TResult DefaultsTo Document,
-      ct: ClassTag[TResult]
-  ): ListDatabasesObservable[TResult] =
-    ListDatabasesObservable(wrapped.listDatabases(ct))
-
-  /**
-   * Gets the list of databases
-   *
-   * @param clientSession the client session with which to associate this operation
-   * @tparam TResult the type of the class to use instead of `Document`.
-   * @return the fluent list databases interface
-   * @since 2.2
-   * @note Requires MongoDB 3.6 or greater
-   */
-  def listDatabases[TResult](
-      clientSession: ClientSession
-  )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): ListDatabasesObservable[TResult] =
-    ListDatabasesObservable(wrapped.listDatabases(clientSession, ct))
-
-  /**
-   * Creates a change stream for this collection.
-   *
-   * @tparam C   the target document type of the observable.
-   * @return the change stream observable
-   * @since 2.4
-   * @note Requires MongoDB 4.0 or greater
-   */
-  def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
-    ChangeStreamObservable(wrapped.watch(ct))
-
-  /**
-   * Creates a change stream for this collection.
-   *
-   * @param pipeline the aggregation pipeline to apply to the change stream
-   * @tparam C   the target document type of the observable.
-   * @return the change stream observable
-   * @since 2.4
-   * @note Requires MongoDB 4.0 or greater
-   */
-  def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
-    ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct))
-
-  /**
-   * Creates a change stream for this collection.
-   *
-   * @param clientSession the client session with which to associate this operation
-   * @tparam C   the target document type of the observable.
-   * @return the change stream observable
-   * @since 2.4
-   * @note Requires MongoDB 4.0 or greater
-   */
-  def watch[C](
-      clientSession: ClientSession
-  )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
-    ChangeStreamObservable(wrapped.watch(clientSession, ct))
-
-  /**
-   * Creates a change stream for this collection.
-   *
-   * @param clientSession the client session with which to associate this operation
-   * @param pipeline the aggregation pipeline to apply to the change stream
-   * @tparam C   the target document type of the observable.
-   * @return the change stream observable
-   * @since 2.4
-   * @note Requires MongoDB 4.0 or greater
-   */
-  def watch[C](
-      clientSession: ClientSession,
-      pipeline: Seq[Bson]
-  )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
-    ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct))
-
   /**
    * Gets the current cluster description.
    *
-   * <p>
-   * This method will not block, meaning that it may return a { @link ClusterDescription} whose { @code clusterType} is unknown
+   * This method will not block, meaning that it may return a `ClusterDescription` whose `clusterType` is unknown
    * and whose { @link com.mongodb.connection.ServerDescription}s are all in the connecting state.  If the application requires
-   * notifications after the driver has connected to a member of the cluster, it should register a { @link ClusterListener} via
-   * the { @link ClusterSettings} in { @link com.mongodb.MongoClientSettings}.
-   * </p>
+   * notifications after the driver has connected to a member of the cluster, it should register a `ClusterListener` via
+   * the `ClusterSettings` in `MongoClientSettings`.
    *
    * @return the current cluster description
-   * @see ClusterSettings.Builder#addClusterListener(ClusterListener)
-   * @see com.mongodb.MongoClientSettings.Builder#applyToClusterSettings(com.mongodb.Block)
    * @since 4.1
    */
   def getClusterDescription: ClusterDescription =
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala
new file mode 100644
index 00000000000..a7352d5ac41
--- /dev/null
+++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.mongodb.scala
+
+import com.mongodb.annotations.{ Alpha, Reason }
+import com.mongodb.{ ReadConcern, ReadPreference, WriteConcern }
+import com.mongodb.reactivestreams.client.{ MongoCluster => JMongoCluster }
+import org.bson.codecs.configuration.CodecRegistry
+import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
+import org.mongodb.scala.bson.conversions.Bson
+
+import scala.collection.JavaConverters._
+import scala.concurrent.duration.{ Duration, MILLISECONDS }
+import scala.reflect.ClassTag
+
+/**
+ * Companion object for creating new [[MongoCluster]] instances
+ *
+ * @since 1.0
+ */
+object MongoCluster {
+
+  /**
+   * Create a new `MongoCluster` wrapper
+   *
+   * @param wrapped the java `MongoCluster` instance
+   * @return MongoCluster
+   */
+  def apply(wrapped: JMongoCluster): MongoCluster = new MongoCluster(wrapped)
+}
+
+/**
+ * The client-side representation of a MongoDB cluster operations.
+ *
+ * The originating [[MongoClient]] is responsible for the closing of resources.
+ * If the originator [[MongoClient]] is closed, then any operations will fail.
+ *
+ * @see MongoClient
+ * @since 5.2
+ */
+class MongoCluster(private val wrapped: JMongoCluster) {
+
+  /**
+   * Get the codec registry for the MongoDatabase.
+   *
+   * @return the { @link org.bson.codecs.configuration.CodecRegistry}
+   */
+  lazy val codecRegistry: CodecRegistry = wrapped.getCodecRegistry
+
+  /**
+   * Get the read preference for the MongoDatabase.
+   *
+   * @return the { @link com.mongodb.ReadPreference}
+   */
+  lazy val readPreference: ReadPreference = wrapped.getReadPreference
+
+  /**
+   * Get the write concern for the MongoDatabase.
+   *
+   * @return the { @link com.mongodb.WriteConcern}
+   */
+  lazy val writeConcern: WriteConcern = wrapped.getWriteConcern
+
+  /**
+   * Get the read concern for the MongoDatabase.
+   *
+   * @return the [[ReadConcern]]
+   */
+  lazy val readConcern: ReadConcern = wrapped.getReadConcern
+
+  /**
+   * The time limit for the full execution of an operation.
+   *
+   * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+   * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+   *
+   *   - `null` means that the timeout mechanism for operations will defer to using:
+   *      - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available
+   *      - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+   *      - `wTimeoutMS`: How long the server will wait for  the write concern to be fulfilled before timing out.
+   *      - `maxTimeMS`: The time limit for processing operations on a cursor.
+   *        See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+   *      - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+   *   - `0` means infinite timeout.
+   *   - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @return the optional timeout duration
+   */
+  @Alpha(Array(Reason.CLIENT))
+  lazy val timeout: Option[Duration] =
+    Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS))
+
+  /**
+   * Create a new MongoCluster instance with a different codec registry.
+   *
+   * The { @link CodecRegistry} configured by this method is effectively treated by the driver as an
+   * instance of { @link CodecProvider}, which { @link CodecRegistry} extends.
+   * So there is no benefit to defining a class that implements { @link CodecRegistry}. Rather, an
+   * application should always create { @link CodecRegistry} instances using the factory methods in
+   * { @link CodecRegistries}.
+   *
+   * @param codecRegistry the new { @link org.bson.codecs.configuration.CodecRegistry} for the collection
+   * @return a new MongoCluster instance with the different codec registry
+   * @see CodecRegistries
+   */
+  def withCodecRegistry(codecRegistry: CodecRegistry): MongoCluster =
+    MongoCluster(wrapped.withCodecRegistry(codecRegistry))
+
+  /**
+   * Create a new MongoCluster instance with a different read preference.
+   *
+   * @param readPreference the new { @link com.mongodb.ReadPreference} for the collection
+   * @return a new MongoCluster instance with the different readPreference
+   */
+  def withReadPreference(readPreference: ReadPreference): MongoCluster =
+    MongoCluster(wrapped.withReadPreference(readPreference))
+
+  /**
+   * Create a new MongoCluster instance with a different write concern.
+   *
+   * @param writeConcern the new { @link com.mongodb.WriteConcern} for the collection
+   * @return a new MongoCluster instance with the different writeConcern
+   */
+  def withWriteConcern(writeConcern: WriteConcern): MongoCluster =
+    MongoCluster(wrapped.withWriteConcern(writeConcern))
+
+  /**
+   * Create a new MongoCluster instance with a different read concern.
+   *
+   * @param readConcern the new [[ReadConcern]] for the collection
+   * @return a new MongoCluster instance with the different ReadConcern
+   * @since 1.1
+   */
+  def withReadConcern(readConcern: ReadConcern): MongoCluster =
+    MongoCluster(wrapped.withReadConcern(readConcern))
+
+  /**
+   * Create a new MongoCluster instance with the set time limit for the full execution of an operation.
+   *
+   * - `0` means infinite timeout.
+   * - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @param timeout the timeout, which must be greater than or equal to 0
+   * @return a new MongoCluster instance with the set time limit for operations
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def withTimeout(timeout: Duration): MongoCluster =
+    MongoCluster(wrapped.withTimeout(timeout.toMillis, MILLISECONDS))
+
+  /**
+   * Creates a client session.
+   *
+   * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations.
+   *
+   * @since 2.4
+   * @note Requires MongoDB 3.6 or greater
+   */
+  def startSession(): SingleObservable[ClientSession] = wrapped.startSession()
+
+  /**
+   * Creates a client session.
+   *
+   * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations.
+   *
+   * @param options  the options for the client session
+   * @since 2.2
+   * @note Requires MongoDB 3.6 or greater
+   */
+  def startSession(options: ClientSessionOptions): SingleObservable[ClientSession] = wrapped.startSession(options)
+
+  /**
+   * Gets the database with the given name.
+   *
+   * @param name the name of the database
+   * @return the database
+   */
+  def getDatabase(name: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(name))
+
+  /**
+   * Get a list of the database names
+   *
+   * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]]
+   * @return an iterable containing all the names of all the databases
+   */
+  def listDatabaseNames(): Observable[String] = wrapped.listDatabaseNames()
+
+  /**
+   * Get a list of the database names
+   *
+   * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]]
+   *
+   * @param clientSession the client session with which to associate this operation
+   * @return an iterable containing all the names of all the databases
+   * @since 2.2
+   * @note Requires MongoDB 3.6 or greater
+   */
+  def listDatabaseNames(clientSession: ClientSession): Observable[String] = wrapped.listDatabaseNames(clientSession)
+
+  /**
+   * Gets the list of databases
+   *
+   * @tparam TResult   the type of the class to use instead of `Document`.
+   * @return the fluent list databases interface
+   */
+  def listDatabases[TResult]()(
+      implicit e: TResult DefaultsTo Document,
+      ct: ClassTag[TResult]
+  ): ListDatabasesObservable[TResult] =
+    ListDatabasesObservable(wrapped.listDatabases(ct))
+
+  /**
+   * Gets the list of databases
+   *
+   * @param clientSession the client session with which to associate this operation
+   * @tparam TResult the type of the class to use instead of `Document`.
+   * @return the fluent list databases interface
+   * @since 2.2
+   * @note Requires MongoDB 3.6 or greater
+   */
+  def listDatabases[TResult](
+      clientSession: ClientSession
+  )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): ListDatabasesObservable[TResult] =
+    ListDatabasesObservable(wrapped.listDatabases(clientSession, ct))
+
+  /**
+   * Creates a change stream for this collection.
+   *
+   * @tparam C   the target document type of the observable.
+   * @return the change stream observable
+   * @since 2.4
+   * @note Requires MongoDB 4.0 or greater
+   */
+  def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
+    ChangeStreamObservable(wrapped.watch(ct))
+
+  /**
+   * Creates a change stream for this collection.
+   *
+   * @param pipeline the aggregation pipeline to apply to the change stream
+   * @tparam C   the target document type of the observable.
+   * @return the change stream observable
+   * @since 2.4
+   * @note Requires MongoDB 4.0 or greater
+   */
+  def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
+    ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct))
+
+  /**
+   * Creates a change stream for this collection.
+   *
+   * @param clientSession the client session with which to associate this operation
+   * @tparam C   the target document type of the observable.
+   * @return the change stream observable
+   * @since 2.4
+   * @note Requires MongoDB 4.0 or greater
+   */
+  def watch[C](
+      clientSession: ClientSession
+  )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
+    ChangeStreamObservable(wrapped.watch(clientSession, ct))
+
+  /**
+   * Creates a change stream for this collection.
+   *
+   * @param clientSession the client session with which to associate this operation
+   * @param pipeline the aggregation pipeline to apply to the change stream
+   * @tparam C   the target document type of the observable.
+   * @return the change stream observable
+   * @since 2.4
+   * @note Requires MongoDB 4.0 or greater
+   */
+  def watch[C](
+      clientSession: ClientSession,
+      pipeline: Seq[Bson]
+  )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] =
+    ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct))
+
+}
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala
index e2682e0130d..bdd63f9245a 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala
@@ -16,6 +16,7 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
 import com.mongodb.client.model.DropCollectionOptions
 
 import java.util
@@ -27,6 +28,7 @@ import org.mongodb.scala.model._
 import org.mongodb.scala.result._
 
 import scala.collection.JavaConverters._
+import scala.concurrent.duration.{ Duration, MILLISECONDS, TimeUnit }
 import scala.reflect.ClassTag
 
 // scalastyle:off number.of.methods file.size.limit
@@ -83,6 +85,29 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul
    */
   lazy val readConcern: ReadConcern = wrapped.getReadConcern
 
+  /**
+   * The time limit for the full execution of an operation.
+   *
+   * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+   * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+   *
+   *   - `null` means that the timeout mechanism for operations will defer to using:
+   *      - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available
+   *      - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+   *      - `wTimeoutMS`: How long the server will wait for  the write concern to be fulfilled before timing out.
+   *      - `maxTimeMS`: The time limit for processing operations on a cursor.
+   *        See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+   *      - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+   *   - `0` means infinite timeout.
+   *   - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @return the optional timeout duration
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  lazy val timeout: Option[Duration] =
+    Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS))
+
   /**
    * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into..
    *
@@ -136,6 +161,20 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul
   def withReadConcern(readConcern: ReadConcern): MongoCollection[TResult] =
     MongoCollection(wrapped.withReadConcern(readConcern))
 
+  /**
+   * Sets the time limit for the full execution of an operation.
+   *
+   * - `0` means infinite timeout.
+   * - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @param timeout the timeout, which must be greater than or equal to 0
+   * @return a new MongoCollection instance with the set time limit for operations
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def withTimeout(timeout: Duration): MongoCollection[TResult] =
+    MongoCollection(wrapped.withTimeout(timeout.toMillis, MILLISECONDS))
+
   /**
    * Gets an estimate of the count of documents in a collection using collection metadata.
    *
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala
index 33ad891373c..54c48574c72 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala
@@ -16,6 +16,7 @@
 
 package org.mongodb.scala
 
+import com.mongodb.annotations.{ Alpha, Reason }
 import com.mongodb.client.model.{ CreateCollectionOptions, CreateViewOptions }
 import com.mongodb.reactivestreams.client.{ MongoDatabase => JMongoDatabase }
 import org.bson.codecs.configuration.CodecRegistry
@@ -23,6 +24,7 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo
 import org.mongodb.scala.bson.conversions.Bson
 
 import scala.collection.JavaConverters._
+import scala.concurrent.duration.{ Duration, MILLISECONDS }
 import scala.reflect.ClassTag
 
 /**
@@ -69,6 +71,29 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
    */
   lazy val readConcern: ReadConcern = wrapped.getReadConcern
 
+  /**
+   * The time limit for the full execution of an operation.
+   *
+   * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+   * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+   *
+   *   - `null` means that the timeout mechanism for operations will defer to using:
+   *      - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available
+   *      - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+   *      - `wTimeoutMS`: How long the server will wait for  the write concern to be fulfilled before timing out.
+   *      - `maxTimeMS`: The time limit for processing operations on a cursor.
+   *        See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+   *      - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+   *   - `0` means infinite timeout.
+   *   - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @return the optional timeout duration
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  lazy val timeout: Option[Duration] =
+    Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS))
+
   /**
    * Create a new MongoDatabase instance with a different codec registry.
    *
@@ -113,6 +138,20 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
   def withReadConcern(readConcern: ReadConcern): MongoDatabase =
     MongoDatabase(wrapped.withReadConcern(readConcern))
 
+  /**
+   * Sets the time limit for the full execution of an operation.
+   *
+   * - `0` means infinite timeout.
+   * - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @param timeout the timeout, which must be greater than or equal to 0
+   * @return a new MongoDatabase instance with the set time limit for operations
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def withTimeout(timeout: Duration): MongoDatabase =
+    MongoDatabase(wrapped.withTimeout(timeout.toMillis, MILLISECONDS))
+
   /**
    * Gets a collection, with a specific default document class.
    *
@@ -128,6 +167,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
   /**
    * Executes command in the context of the current database using the primary server.
    *
+   * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the
+   * `timeoutMS` setting has been set.
+   *
    * @param command  the command to be run
    * @tparam TResult the type of the class to use instead of [[Document]].
    * @return a Observable containing the command result
@@ -140,6 +182,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
   /**
    * Executes command in the context of the current database.
    *
+   * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the
+   * `timeoutMS` setting has been set.
+   *
    * @param command        the command to be run
    * @param readPreference the [[ReadPreference]] to be used when executing the command
    * @tparam TResult       the type of the class to use instead of [[Document]].
@@ -154,6 +199,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
   /**
    * Executes command in the context of the current database using the primary server.
    *
+   * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the
+   * `timeoutMS` setting has been set.
+   *
    * @param clientSession the client session with which to associate this operation
    * @param command  the command to be run
    * @tparam TResult the type of the class to use instead of [[Document]].
@@ -170,6 +218,9 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) {
   /**
    * Executes command in the context of the current database.
    *
+   * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the
+   * `timeoutMS` setting has been set.
+   *
    * @param command        the command to be run
    * @param readPreference the [[ReadPreference]] to be used when executing the command
    * @tparam TResult       the type of the class to use instead of [[Document]].
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala
index 88400883009..b828fe6074f 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala
@@ -16,8 +16,8 @@
 
 package org.mongodb.scala.gridfs
 
+import com.mongodb.annotations.{ Alpha, Reason }
 import java.nio.ByteBuffer
-
 import com.mongodb.reactivestreams.client.gridfs.{ GridFSBucket => JGridFSBucket, GridFSBuckets }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.bson.{ BsonObjectId, BsonValue, ObjectId }
@@ -31,6 +31,8 @@ import org.mongodb.scala.{
   WriteConcern
 }
 
+import scala.concurrent.duration.{ Duration, MILLISECONDS }
+
 /**
  * A factory for GridFSBucket instances.
  *
@@ -102,6 +104,29 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    */
   lazy val readConcern: ReadConcern = wrapped.getReadConcern
 
+  /**
+   * The time limit for the full execution of an operation.
+   *
+   * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`,
+   * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`.
+   *
+   *   - `null` means that the timeout mechanism for operations will defer to using:
+   *      - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available
+   *      - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out.
+   *      - `wTimeoutMS`: How long the server will wait for  the write concern to be fulfilled before timing out.
+   *      - `maxTimeMS`: The time limit for processing operations on a cursor.
+   *        See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS").
+   *      - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute.
+   *   - `0` means infinite timeout.
+   *   - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @return the optional timeout duration
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  lazy val timeout: Option[Duration] =
+    Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS))
+
   /**
    * Create a new GridFSBucket instance with a new chunk size in bytes.
    *
@@ -137,12 +162,29 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    */
   def withReadConcern(readConcern: ReadConcern): GridFSBucket = GridFSBucket(wrapped.withReadConcern(readConcern))
 
+  /**
+   * Sets the time limit for the full execution of an operation.
+   *
+   * - `0` means infinite timeout.
+   * - `> 0` The time limit to use for the full execution of an operation.
+   *
+   * @param timeout the timeout, which must be greater than or equal to 0
+   * @return a new GridFSBucket instance with the set time limit for operations
+   * @since 5.2
+   */
+  @Alpha(Array(Reason.CLIENT))
+  def withTimeout(timeout: Duration): GridFSBucket =
+    GridFSBucket(wrapped.withTimeout(timeout.toMillis, MILLISECONDS))
+
   /**
    * Uploads the contents of the given `Observable` to a GridFS bucket.
    *
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param filename the filename for the stream
    * @param source   the Publisher providing the file data
@@ -158,6 +200,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param filename the filename for the stream
    * @param source   the Publisher providing the file data
@@ -178,6 +223,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param id       the custom id value of the file
    * @param filename the filename for the stream
@@ -198,6 +246,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param id       the custom id value of the file
    * @param filename the filename for the stream
@@ -220,6 +271,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param clientSession the client session with which to associate this operation
    * @param filename      the filename for the stream
@@ -241,6 +295,10 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
+   *
    * @param clientSession the client session with which to associate this operation
    * @param filename      the filename for the stream
    * @param source        the Publisher providing the file data
@@ -263,6 +321,9 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
    *
    * @param clientSession the client session with which to associate this operation
    * @param id            the custom id value of the file
@@ -286,6 +347,10 @@ case class GridFSBucket(private val wrapped: JGridFSBucket) {
    * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the
    * chunks have been uploaded, it creates a files collection document for `filename` in the files collection.
    *
+   * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]]
+   * settings or [[GridFSBucket#withTimeout()]]), timeout breaches may occur due to the [[Observable]]
+   * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.
+   *
    * @param clientSession the client session with which to associate this operation
    * @param id            the custom id value of the file
    * @param filename      the filename for the stream
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala
index 79d0a4a17b1..fdbea9add70 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala
@@ -17,10 +17,9 @@
 package org.mongodb.scala.gridfs
 
 import java.util.concurrent.TimeUnit
-
 import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher
 import org.mongodb.scala.bson.conversions.Bson
-import org.mongodb.scala.{ Observable, Observer, SingleObservable }
+import org.mongodb.scala.{ Observable, Observer, SingleObservable, TimeoutMode }
 
 import scala.concurrent.duration.Duration
 
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala
index fc3196f76f6..0fff8c4c8ba 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala
@@ -16,7 +16,7 @@
 
 package org.mongodb.scala.model
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.fill.FillOutputField
 import com.mongodb.client.model.search.FieldSearchPath
 
@@ -737,7 +737,7 @@ object Aggregates {
    * @note Requires MongoDB 6.0.10 or greater
    * @since 4.11
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   def vectorSearch(
       path: FieldSearchPath,
       queryVector: Iterable[java.lang.Double],
@@ -763,7 +763,7 @@ object Aggregates {
    * @note Requires MongoDB 6.0.10 or greater
    * @since 4.11
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   def vectorSearch(
       path: FieldSearchPath,
       queryVector: Iterable[java.lang.Double],
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala
index 4688fa818c6..5ccbd299edf 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit, Windows => JWindows }
 import org.bson.types.Decimal128
 import org.mongodb.scala.bson.conversions.Bson
@@ -56,7 +56,7 @@ import org.mongodb.scala.bson.conversions.Bson
  * @since 4.3
  * @note Requires MongoDB 5.0 or greater.
  */
-@Beta(Array(Beta.Reason.SERVER))
+@Beta(Array(Reason.SERVER))
 object Windows {
 
   /**
@@ -248,7 +248,7 @@ object Windows {
    * @since 4.3
    * @note Requires MongoDB 5.0 or greater.
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   object Bound {
 
     /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala
index a8dc63a2b29..111af0e6568 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala
@@ -16,7 +16,7 @@
 
 package org.mongodb.scala
 
-import com.mongodb.annotations.{ Beta, Sealed }
+import com.mongodb.annotations.{ Beta, Reason, Sealed }
 
 import scala.collection.JavaConverters._
 import com.mongodb.client.model.{ GeoNearOptions, MongoTimeUnit => JMongoTimeUnit, WindowOutputField }
@@ -173,7 +173,7 @@ package object model {
    *
    * @since 4.9
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   type CreateEncryptedCollectionParams = com.mongodb.client.model.CreateEncryptedCollectionParams
 
   /**
@@ -181,7 +181,7 @@ package object model {
    *
    * @since 4.9
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   object CreateEncryptedCollectionParams {
     def apply(kmsProvider: String) =
       new com.mongodb.client.model.CreateEncryptedCollectionParams(kmsProvider)
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala
index afeb5d195d8..d106d6bbd9d 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ FuzzySearchOptions => JFuzzySearchOptions }
 
 /**
@@ -25,7 +25,7 @@ import com.mongodb.client.model.search.{ FuzzySearchOptions => JFuzzySearchOptio
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object FuzzySearchOptions {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala
index d4fe9ccdffc..a651e502b10 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchCollector => JSearchCollector }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.model.Projections
@@ -30,7 +30,7 @@ import scala.collection.JavaConverters._
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchCollector {
 
   /**
@@ -42,7 +42,7 @@ object SearchCollector {
    * @return The requested `SearchCollector`.
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/ facet collector]]
    */
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   def facet(operator: SearchOperator, facets: Iterable[_ <: SearchFacet]): FacetSearchCollector =
     JSearchCollector.facet(operator, facets.asJava)
 
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala
index 0df9a08ac51..ecba0ecce0d 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchCount => JSearchCount }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.model.Projections
@@ -28,7 +28,7 @@ import org.mongodb.scala.model.Projections
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+@Beta(Array(Reason.CLIENT, Reason.SERVER))
 object SearchCount {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala
index 4482c8bc678..3bc27520ea3 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchFacet => JSearchFacet }
 import org.mongodb.scala.bson.conversions.Bson
 
@@ -28,7 +28,7 @@ import collection.JavaConverters._
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+@Beta(Array(Reason.CLIENT, Reason.SERVER))
 object SearchFacet {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala
index a46903a3147..7ac1deebac1 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchHighlight => JSearchHighlight }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.model.Projections
@@ -30,7 +30,7 @@ import collection.JavaConverters._
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchHighlight {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala
index a1dc4caebff..90f27092ebc 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchOperator => JSearchOperator }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.model.geojson.Point
@@ -29,7 +29,7 @@ import collection.JavaConverters._
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchOperator {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala
index 56069e8624d..5eb61591043 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchOptions => JSearchOptions }
 
 /**
@@ -24,7 +24,7 @@ import com.mongodb.client.model.search.{ SearchOptions => JSearchOptions }
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchOptions {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala
index cfe85faa6f7..74999deef35 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchPath => JSearchPath }
 
 /**
@@ -27,7 +27,7 @@ import com.mongodb.client.model.search.{ SearchPath => JSearchPath }
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchPath {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala
index b43598220e3..35005c05970 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchScore => JSearchScore }
 import org.mongodb.scala.bson.conversions.Bson
 import org.mongodb.scala.model.Projections
@@ -28,7 +28,7 @@ import org.mongodb.scala.model.Projections
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchScore {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala
index 22657bc874e..244c07e5847 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ SearchScoreExpression => JSearchScoreExpression }
 import org.mongodb.scala.bson.conversions.Bson
 
@@ -26,7 +26,7 @@ import collection.JavaConverters._
  * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]]
  * @since 4.7
  */
-@Beta(Array(Beta.Reason.CLIENT))
+@Beta(Array(Reason.CLIENT))
 object SearchScoreExpression {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala
index e355a5558cc..ab25650ca7a 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model.search
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOptions }
 
 /**
@@ -25,7 +25,7 @@ import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOpt
  * @note Requires MongoDB 6.0.10 or greater
  * @since 4.11
  */
-@Beta(Array(Beta.Reason.SERVER))
+@Beta(Array(Reason.SERVER))
 object VectorSearchOptions {
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala
index e3f3fb5e308..fb9e393dd1b 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala
@@ -15,7 +15,7 @@
  */
 package org.mongodb.scala.model
 
-import com.mongodb.annotations.{ Beta, Sealed }
+import com.mongodb.annotations.{ Beta, Reason, Sealed }
 
 /**
  * Query building API for MongoDB Atlas full-text search.
@@ -40,7 +40,7 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchOperator = com.mongodb.client.model.search.SearchOperator
 
   /**
@@ -50,14 +50,14 @@ package object search {
    * @see `SearchOperator.compound()`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type CompoundSearchOperatorBase = com.mongodb.client.model.search.CompoundSearchOperatorBase
 
   /**
    * @see `SearchOperator.compound()`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type CompoundSearchOperator = com.mongodb.client.model.search.CompoundSearchOperator
 
   /**
@@ -68,7 +68,7 @@ package object search {
    * @see `CompoundSearchOperatorBase.must(Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type MustCompoundSearchOperator = com.mongodb.client.model.search.MustCompoundSearchOperator
 
   /**
@@ -79,7 +79,7 @@ package object search {
    * @see `CompoundSearchOperatorBase.mustNot(Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type MustNotCompoundSearchOperator = com.mongodb.client.model.search.MustNotCompoundSearchOperator
 
   /**
@@ -90,7 +90,7 @@ package object search {
    * @see `CompoundSearchOperatorBase.should(Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type ShouldCompoundSearchOperator = com.mongodb.client.model.search.ShouldCompoundSearchOperator
 
   /**
@@ -101,14 +101,14 @@ package object search {
    * @see `CompoundSearchOperatorBase.filter(Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type FilterCompoundSearchOperator = com.mongodb.client.model.search.FilterCompoundSearchOperator
 
   /**
    * @see `SearchOperator.exists(FieldSearchPath)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type ExistsSearchOperator = com.mongodb.client.model.search.ExistsSearchOperator
 
   /**
@@ -116,7 +116,7 @@ package object search {
    * @see `SearchOperator.text(Iterable, Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type TextSearchOperator = com.mongodb.client.model.search.TextSearchOperator
 
   /**
@@ -124,7 +124,7 @@ package object search {
    * @see `SearchOperator.autocomplete(Iterable, FieldSearchPath)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type AutocompleteSearchOperator = com.mongodb.client.model.search.AutocompleteSearchOperator
 
   /**
@@ -134,7 +134,7 @@ package object search {
    * @see `SearchOperator.numberRange`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type NumberRangeSearchOperatorBase = com.mongodb.client.model.search.NumberRangeSearchOperatorBase
 
   /**
@@ -144,42 +144,42 @@ package object search {
    * @see `SearchOperator.dateRange`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type DateRangeSearchOperatorBase = com.mongodb.client.model.search.DateRangeSearchOperatorBase
 
   /**
    * @see `SearchOperator.numberRange`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type NumberRangeSearchOperator = com.mongodb.client.model.search.NumberRangeSearchOperator
 
   /**
    * @see `SearchOperator.dateRange`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type DateRangeSearchOperator = com.mongodb.client.model.search.DateRangeSearchOperator
 
   /**
    * @see `SearchOperator.near`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type NumberNearSearchOperator = com.mongodb.client.model.search.NumberNearSearchOperator
 
   /**
    * @see `SearchOperator.near`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type DateNearSearchOperator = com.mongodb.client.model.search.DateNearSearchOperator
 
   /**
    * @see `SearchOperator.near`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type GeoNearSearchOperator = com.mongodb.client.model.search.GeoNearSearchOperator
 
   /**
@@ -189,7 +189,7 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type FuzzySearchOptions = com.mongodb.client.model.search.FuzzySearchOptions
 
   /**
@@ -200,14 +200,14 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchCollector = com.mongodb.client.model.search.SearchCollector
 
   /**
    * @see `SearchCollector.facet(SearchOperator, Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type FacetSearchCollector = com.mongodb.client.model.search.FacetSearchCollector
 
   /**
@@ -216,7 +216,7 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchOptions = com.mongodb.client.model.search.SearchOptions
 
   /**
@@ -227,7 +227,7 @@ package object search {
    * @since 4.11
    */
   @Sealed
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   type VectorSearchOptions = com.mongodb.client.model.search.VectorSearchOptions
 
   /**
@@ -238,7 +238,7 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchHighlight = com.mongodb.client.model.search.SearchHighlight
 
   /**
@@ -250,21 +250,21 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type SearchCount = com.mongodb.client.model.search.SearchCount
 
   /**
    * @see `SearchCount.total()`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type TotalSearchCount = com.mongodb.client.model.search.TotalSearchCount
 
   /**
    * @see `SearchCount.lowerBound()`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type LowerBoundSearchCount = com.mongodb.client.model.search.LowerBoundSearchCount
 
   /**
@@ -273,28 +273,28 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type SearchFacet = com.mongodb.client.model.search.SearchFacet
 
   /**
    * @see `SearchFacet.stringFacet(String, FieldSearchPath)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type StringSearchFacet = com.mongodb.client.model.search.StringSearchFacet
 
   /**
    * @see `SearchFacet.numberFacet(String, FieldSearchPath, Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type NumberSearchFacet = com.mongodb.client.model.search.NumberSearchFacet
 
   /**
    * @see `SearchFacet.dateFacet(String, FieldSearchPath, Iterable)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT, Beta.Reason.SERVER))
+  @Beta(Array(Reason.CLIENT, Reason.SERVER))
   type DateSearchFacet = com.mongodb.client.model.search.DateSearchFacet
 
   /**
@@ -306,21 +306,21 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchPath = com.mongodb.client.model.search.SearchPath
 
   /**
    * @see `SearchPath.fieldPath(String)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type FieldSearchPath = com.mongodb.client.model.search.FieldSearchPath
 
   /**
    * @see `SearchPath.wildcardPath(String)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type WildcardSearchPath = com.mongodb.client.model.search.WildcardSearchPath
 
   /**
@@ -331,35 +331,35 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchScore = com.mongodb.client.model.search.SearchScore
 
   /**
    * @see `SearchScore.boost(float)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type ValueBoostSearchScore = com.mongodb.client.model.search.ValueBoostSearchScore
 
   /**
    * @see `SearchScore.boost(FieldSearchPath)`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type PathBoostSearchScore = com.mongodb.client.model.search.PathBoostSearchScore
 
   /**
    * @see `SearchScore.constant`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type ConstantSearchScore = com.mongodb.client.model.search.ConstantSearchScore
 
   /**
    * @see `SearchScore.function`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type FunctionSearchScore = com.mongodb.client.model.search.FunctionSearchScore
 
   /**
@@ -367,62 +367,62 @@ package object search {
    * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]]
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type SearchScoreExpression = com.mongodb.client.model.search.SearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.relevanceExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type RelevanceSearchScoreExpression = com.mongodb.client.model.search.RelevanceSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.pathExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type PathSearchScoreExpression = com.mongodb.client.model.search.PathSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.constantExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type ConstantSearchScoreExpression = com.mongodb.client.model.search.ConstantSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.gaussExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type GaussSearchScoreExpression = com.mongodb.client.model.search.GaussSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.log`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type LogSearchScoreExpression = com.mongodb.client.model.search.LogSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.log1p`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type Log1pSearchScoreExpression = com.mongodb.client.model.search.Log1pSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.addExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type AddSearchScoreExpression = com.mongodb.client.model.search.AddSearchScoreExpression
 
   /**
    * @see `SearchScoreExpression.multiplyExpression`
    */
   @Sealed
-  @Beta(Array(Beta.Reason.CLIENT))
+  @Beta(Array(Reason.CLIENT))
   type MultiplySearchScoreExpression = com.mongodb.client.model.search.MultiplySearchScoreExpression
 }
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala
index bf1f7b1ae5b..f57ddce32c6 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala
@@ -16,7 +16,7 @@
 
 package org.mongodb.scala.model
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.vault.{ DataKeyOptions => JDataKeyOptions }
 import com.mongodb.client.model.vault.{ EncryptOptions => JEncryptOptions }
 import com.mongodb.client.model.vault.{ RangeOptions => JRangeOptions }
@@ -60,7 +60,7 @@ package object vault {
    *
    * @since 4.9
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   type RangeOptions = JRangeOptions
 
   object RangeOptions {
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/package.scala
index b52ff13fd61..7da5578ff96 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/package.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/package.scala
@@ -16,7 +16,7 @@
 
 package org.mongodb
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import org.bson.BsonDocumentReader
 import org.bson.codecs.{ DecoderContext, DocumentCodec }
 import org.mongodb.scala.bson.BsonDocument
@@ -108,6 +108,16 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit
    */
   type TagSet = com.mongodb.TagSet
 
+  /**
+   * The timeout mode for a cursor
+   *
+   * For operations that create cursors, `timeoutMS` can either cap the lifetime of the cursor or be applied separately to the
+   * original operation and all next calls.
+   *
+   * @since 5.2
+   */
+  type TimeoutMode = com.mongodb.client.cursor.TimeoutMode
+
   /**
    * Controls the acknowledgment of write operations with various options.
    */
@@ -323,6 +333,11 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit
    */
   type MongoSocketReadTimeoutException = com.mongodb.MongoSocketReadTimeoutException
 
+  /**
+   * This exception is thrown when there is a timeout writing to a socket.
+   */
+  type MongoSocketWriteTimeoutException = com.mongodb.MongoSocketWriteTimeoutException
+
   /**
    * This exception is thrown when there is an exception writing a response to a Socket.
    */
@@ -333,6 +348,19 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit
    */
   type MongoTimeoutException = com.mongodb.MongoTimeoutException
 
+  /**
+   * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for
+   * the full execution of operation.
+   *
+   * <p> The [[MongoOperationTimeoutException]] might provide information about the underlying
+   * cause of the timeout, if available. For example, if retries are attempted due to transient failures,
+   * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended
+   * as the cause to this [[MongoOperationTimeoutException]].
+   
+   @since 5.0
+   */
+  type MongoOperationTimeoutException = com.mongodb.MongoOperationTimeoutException
+
   /**
    * An exception indicating a failure to apply the write concern to the requested write operation
    *
@@ -367,7 +395,7 @@ package object scala extends ClientSessionImplicits with ObservableImplicits wit
    *
    * @since 4.9
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   type MongoUpdatedEncryptedFieldsException = com.mongodb.MongoUpdatedEncryptedFieldsException
 
   /**
diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala
index b4c9de4d440..3d375b56e21 100644
--- a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala
+++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala
@@ -16,7 +16,7 @@
 
 package org.mongodb.scala.vault
 
-import com.mongodb.annotations.Beta
+import com.mongodb.annotations.{ Beta, Reason }
 import com.mongodb.client.model.{ CreateCollectionOptions, CreateEncryptedCollectionParams }
 
 import java.io.Closeable
@@ -91,7 +91,7 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos
    * @return a Publisher containing the queryable encrypted range expression
    * @since 4.9
    */
-  @Beta(Array(Beta.Reason.SERVER)) def encryptExpression(
+  @Beta(Array(Reason.SERVER)) def encryptExpression(
       expression: Document,
       options: EncryptOptions
   ): SingleObservable[Document] =
@@ -126,7 +126,7 @@ case class ClientEncryption(private val wrapped: JClientEncryption) extends Clos
    * @note Requires MongoDB 7.0 or greater.
    * @see [[https://www.mongodb.com/docs/manual/reference/command/create/ Create Command]]
    */
-  @Beta(Array(Beta.Reason.SERVER))
+  @Beta(Array(Reason.SERVER))
   def createEncryptedCollection(
       database: MongoDatabase,
       collectionName: String,
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala
index d18004e5aa5..b0edcb68b8e 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala
@@ -17,13 +17,13 @@
 package org.mongodb.scala
 
 import com.mongodb.ExplainVerbosity
-
-import java.util.concurrent.TimeUnit
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.AggregatePublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.mongodb.scala.model.Collation
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 
 class AggregateObservableSpec extends BaseSpec with MockitoSugar {
@@ -59,6 +59,7 @@ class AggregateObservableSpec extends BaseSpec with MockitoSugar {
     observable.batchSize(batchSize)
     observable.explain[Document]()
     observable.explain[Document](verbosity)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).allowDiskUse(true)
     verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
@@ -70,6 +71,7 @@ class AggregateObservableSpec extends BaseSpec with MockitoSugar {
     verify(wrapper).batchSize(batchSize)
     verify(wrapper).explain(ct)
     verify(wrapper).explain(ct, verbosity)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
 
     observable.toCollection()
     verify(wrapper).toCollection
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala
index 9d1a86ee75a..b22d0d8373d 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala
@@ -87,6 +87,7 @@ class ApiAliasAndCompanionSpec extends BaseSpec {
       "AggregatePrimer",
       "RemovePrimer",
       "SyncMongoClient",
+      "SyncMongoCluster",
       "SyncGridFSBucket",
       "SyncMongoDatabase",
       "SyncClientEncryption"
@@ -104,7 +105,8 @@ class ApiAliasAndCompanionSpec extends BaseSpec {
       "package",
       "ReadConcernLevel",
       "SingleObservable",
-      "Subscription"
+      "Subscription",
+      "TimeoutMode"
     )
 
     val classFilter = (f: Class[_ <: Object]) => {
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala
index ea5a3eb5543..03c745d0ae6 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala
@@ -16,8 +16,7 @@
 
 package org.mongodb.scala
 
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ChangeStreamPublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.mongodb.scala.bson.BsonTimestamp
@@ -26,6 +25,7 @@ import org.mongodb.scala.model.changestream.FullDocument
 import org.reactivestreams.Publisher
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 import scala.util.Success
 
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala
index e609f8ccdc8..e55455579b4 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala
@@ -15,16 +15,15 @@
  */
 
 package org.mongodb.scala
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.DistinctPublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.mongodb.scala.model.Collation
 import org.reactivestreams.Publisher
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
-
 class DistinctObservableSpec extends BaseSpec with MockitoSugar {
 
   "DistinctObservable" should "have the same methods as the wrapped DistinctObservable" in {
@@ -51,11 +50,14 @@ class DistinctObservableSpec extends BaseSpec with MockitoSugar {
     observable.maxTime(duration)
     observable.collation(collation)
     observable.batchSize(batchSize)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).filter(filter)
     verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
     verify(wrapper).collation(collation)
     verify(wrapper).batchSize(batchSize)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
+
     verifyNoMoreInteractions(wrapper)
   }
 }
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala
index 1af77eeb6e7..eaf117a1348 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala
@@ -16,6 +16,7 @@
 
 package org.mongodb.scala
 
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.FindPublisher
 import com.mongodb.{ CursorType, ExplainVerbosity }
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
@@ -75,6 +76,7 @@ class FindObservableSpec extends BaseSpec with MockitoSugar {
     observable.allowDiskUse(true)
     observable.explain[Document]()
     observable.explain[Document](verbosity)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).collation(collation)
     verify(wrapper).cursorType(CursorType.NonTailable)
@@ -93,6 +95,8 @@ class FindObservableSpec extends BaseSpec with MockitoSugar {
     verify(wrapper).allowDiskUse(true)
     verify(wrapper).explain(ct)
     verify(wrapper).explain(ct, verbosity)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
+
     verifyNoMoreInteractions(wrapper)
   }
 }
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala
index 60ebad3c597..20990f68b58 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala
@@ -16,13 +16,13 @@
 
 package org.mongodb.scala
 
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListCollectionsPublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.reactivestreams.Publisher
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 
 class ListCollectionsObservableSpec extends BaseSpec with MockitoSugar {
@@ -49,10 +49,13 @@ class ListCollectionsObservableSpec extends BaseSpec with MockitoSugar {
     observable.filter(filter)
     observable.maxTime(duration)
     observable.batchSize(batchSize)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).filter(filter)
     verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
     verify(wrapper).batchSize(batchSize)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
+
     verifyNoMoreInteractions(wrapper)
   }
 }
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala
index a0d36fac78d..a80b421af85 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala
@@ -15,13 +15,13 @@
  */
 
 package org.mongodb.scala
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListDatabasesPublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.reactivestreams.Publisher
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 
 class ListDatabasesObservableSpec extends BaseSpec with MockitoSugar {
@@ -48,11 +48,13 @@ class ListDatabasesObservableSpec extends BaseSpec with MockitoSugar {
     observable.filter(filter)
     observable.nameOnly(true)
     observable.batchSize(batchSize)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
     verify(wrapper).filter(filter)
     verify(wrapper).nameOnly(true)
     verify(wrapper).batchSize(batchSize)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
 
     verifyNoMoreInteractions(wrapper)
   }
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala
index 29d7fbe670d..da841fe6656 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala
@@ -15,13 +15,13 @@
  */
 
 package org.mongodb.scala
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.reactivestreams.client.ListIndexesPublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.reactivestreams.Publisher
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 
 class ListIndexesObservableSpec extends BaseSpec with MockitoSugar {
@@ -45,9 +45,12 @@ class ListIndexesObservableSpec extends BaseSpec with MockitoSugar {
 
     observable.maxTime(duration)
     observable.batchSize(batchSize)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
     verify(wrapper).batchSize(batchSize)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
+
     verifyNoMoreInteractions(wrapper)
   }
 }
diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala
index 1b8d164bd21..af08a0f0452 100644
--- a/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala
+++ b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala
@@ -16,14 +16,14 @@
 
 package org.mongodb.scala
 
-import java.util.concurrent.TimeUnit
-
+import com.mongodb.client.cursor.TimeoutMode
 import com.mongodb.client.model.MapReduceAction
 import com.mongodb.reactivestreams.client.MapReducePublisher
 import org.mockito.Mockito.{ verify, verifyNoMoreInteractions }
 import org.mongodb.scala.model.Collation
 import org.scalatestplus.mockito.MockitoSugar
 
+import java.util.concurrent.TimeUnit
 import scala.concurrent.duration.Duration
 
 class MapReduceObservableSpec extends BaseSpec with MockitoSugar {
@@ -63,6 +63,7 @@ class MapReduceObservableSpec extends BaseSpec with MockitoSugar {
     observable.bypassDocumentValidation(true)
     observable.collation(collation)
     observable.batchSize(batchSize)
+    observable.timeoutMode(TimeoutMode.ITERATION)
 
     verify(wrapper).filter(filter)
     verify(wrapper).scope(scope)
@@ -78,6 +79,8 @@ class MapReduceObservableSpec extends BaseSpec with MockitoSugar {
     verify(wrapper).bypassDocumentValidation(true)
     verify(wrapper).collation(collation)
     verify(wrapper).batchSize(batchSize)
+    verify(wrapper).timeoutMode(TimeoutMode.ITERATION)
+    verifyNoMoreInteractions(wrapper)
 
     observable.toCollection()
     verify(wrapper).toCollection
diff --git a/driver-sync/src/main/com/mongodb/client/AggregateIterable.java b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java
index 83e232fecc4..5f7a0dc2aff 100644
--- a/driver-sync/src/main/com/mongodb/client/AggregateIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java
@@ -17,6 +17,9 @@
 package com.mongodb.client;
 
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -62,6 +65,31 @@ public interface AggregateIterable<TResult> extends MongoIterable<TResult> {
      */
     AggregateIterable<TResult> batchSize(int batchSize);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     * <p>
+     *     Will error if the timeoutMode is set to {@link TimeoutMode#ITERATION} and the pipeline contains either
+     *     an {@code $out} or a {@code $merge} stage.
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    AggregateIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Sets the maximum execution time on the server for this operation.
      *
diff --git a/driver-sync/src/main/com/mongodb/client/DistinctIterable.java b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java
index f044a96ab41..9206b7d3094 100644
--- a/driver-sync/src/main/com/mongodb/client/DistinctIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -88,4 +91,19 @@ public interface DistinctIterable<TResult> extends MongoIterable<TResult> {
      * @mongodb.server.release 4.4
      */
     DistinctIterable<TResult> comment(@Nullable BsonValue comment);
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    DistinctIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
 }
diff --git a/driver-sync/src/main/com/mongodb/client/FindIterable.java b/driver-sync/src/main/com/mongodb/client/FindIterable.java
index 4cd3c7b7f43..d610ed73ffa 100644
--- a/driver-sync/src/main/com/mongodb/client/FindIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/FindIterable.java
@@ -18,6 +18,9 @@
 
 import com.mongodb.CursorType;
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.client.model.Projections;
 import com.mongodb.lang.Nullable;
@@ -261,6 +264,28 @@ public interface FindIterable<TResult> extends MongoIterable<TResult> {
      */
     FindIterable<TResult> allowDiskUse(@Nullable Boolean allowDiskUse);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * <p>
+     *     If the {@code timeout} is set then:
+     *     <ul>
+     *      <li>For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *      <li>For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error
+     *      to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}</li>
+     *     </ul>
+     *
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    FindIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Explain the execution plan for this operation with the server's default verbosity level
      *
diff --git a/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java
index 52480103d07..421fbcaa674 100644
--- a/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
@@ -79,4 +82,18 @@ public interface ListCollectionsIterable<TResult> extends MongoIterable<TResult>
      * @mongodb.server.release 4.4
      */
     ListCollectionsIterable<TResult> comment(@Nullable BsonValue comment);
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListCollectionsIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
 }
diff --git a/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java
index 9b344a6ae89..75625e487a0 100644
--- a/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
@@ -101,4 +104,18 @@ public interface ListDatabasesIterable<TResult> extends MongoIterable<TResult> {
      * @mongodb.server.release 4.4
      */
     ListDatabasesIterable<TResult> comment(@Nullable BsonValue comment);
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListDatabasesIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
 }
diff --git a/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java
index 2b3de183d64..160cb59ebd9 100644
--- a/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 
@@ -68,4 +71,18 @@ public interface ListIndexesIterable<TResult> extends MongoIterable<TResult> {
      * @mongodb.server.release 4.4
      */
     ListIndexesIterable<TResult> comment(@Nullable BsonValue comment);
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListIndexesIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
 }
diff --git a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java
index 1cd61add5a0..2384fcef29d 100644
--- a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java
@@ -17,7 +17,10 @@
 package com.mongodb.client;
 
 import com.mongodb.ExplainVerbosity;
+import com.mongodb.annotations.Alpha;
 import com.mongodb.annotations.Evolving;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
@@ -98,6 +101,20 @@ public interface ListSearchIndexesIterable<TResult> extends MongoIterable<TResul
      */
     ListSearchIndexesIterable<TResult> comment(@Nullable BsonValue comment);
 
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    ListSearchIndexesIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
+
     /**
      * Explain the execution plan for this operation with the server's default verbosity level.
      *
diff --git a/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java
index 30706dd6373..d406e785da7 100644
--- a/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java
@@ -16,6 +16,9 @@
 
 package com.mongodb.client;
 
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
 import com.mongodb.lang.Nullable;
 import org.bson.conversions.Bson;
@@ -179,4 +182,18 @@ public interface MapReduceIterable<TResult> extends MongoIterable<TResult> {
      * @mongodb.server.release 3.4
      */
     MapReduceIterable<TResult> collation(@Nullable Collation collation);
+
+    /**
+     * Sets the timeoutMode for the cursor.
+     *
+     * <p>
+     *     Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings},
+     *     via {@link MongoDatabase} or via {@link MongoCollection}
+     * </p>
+     * @param timeoutMode the timeout mode
+     * @return this
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    MapReduceIterable<TResult> timeoutMode(TimeoutMode timeoutMode);
 }
diff --git a/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java
index 38e33c8ae8e..ed58412496d 100644
--- a/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java
+++ b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java
@@ -33,6 +33,16 @@
  * }
  * }</pre>
  *
+ *
+ * <p>
+ * A {@link com.mongodb.MongoOperationTimeoutException} does not invalidate the {@link MongoChangeStreamCursor}, but is immediately
+ * propagated to the caller. Subsequent method call will attempt to resume operation by establishing a new change stream on the server,
+ * without doing {@code getMore} request first. </p>
+ * <p>
+ * If a {@link com.mongodb.MongoOperationTimeoutException} occurs before any events are received, it indicates that the server
+ * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the current stream
+ * and recreate it with a higher timeout setting.
+ *
  * @since 3.11
  * @param <TResult> The type of documents the cursor contains
  */
diff --git a/driver-sync/src/main/com/mongodb/client/MongoClient.java b/driver-sync/src/main/com/mongodb/client/MongoClient.java
index c0b0565df81..14519e2413a 100644
--- a/driver-sync/src/main/com/mongodb/client/MongoClient.java
+++ b/driver-sync/src/main/com/mongodb/client/MongoClient.java
@@ -16,17 +16,12 @@
 
 package com.mongodb.client;
 
-import com.mongodb.ClientSessionOptions;
-import com.mongodb.MongoNamespace;
 import com.mongodb.annotations.Immutable;
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.ClusterSettings;
 import com.mongodb.event.ClusterListener;
-import org.bson.Document;
-import org.bson.conversions.Bson;
 
 import java.io.Closeable;
-import java.util.List;
 
 /**
  * A client-side representation of a MongoDB cluster.  Instances can represent either a standalone MongoDB instance, a replica set,
@@ -42,38 +37,7 @@
  * @since 3.7
  */
 @Immutable
-public interface MongoClient extends Closeable {
-
-    /**
-     * Gets a {@link MongoDatabase} instance for the given database name.
-     *
-     * @param databaseName the name of the database to retrieve
-     * @return a {@code MongoDatabase} representing the specified database
-     * @throws IllegalArgumentException if databaseName is invalid
-     * @see MongoNamespace#checkDatabaseNameValidity(String)
-     */
-    MongoDatabase getDatabase(String databaseName);
-
-    /**
-     * Creates a client session with default options.
-     *
-     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
-     *
-     * @return the client session
-     * @mongodb.server.release 3.6
-     */
-    ClientSession startSession();
-
-    /**
-     * Creates a client session.
-     *
-     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
-     *
-     * @param options  the options for the client session
-     * @return the client session
-     * @mongodb.server.release 3.6
-     */
-    ClientSession startSession(ClientSessionOptions options);
+public interface MongoClient extends MongoCluster, Closeable {
 
     /**
      * Close the client, which will close all underlying cached resources, including, for example,
@@ -81,158 +45,6 @@ public interface MongoClient extends Closeable {
      */
     void close();
 
-    /**
-     * Get a list of the database names
-     *
-     * @return an iterable containing all the names of all the databases
-     * @mongodb.driver.manual reference/command/listDatabases List Databases
-     */
-    MongoIterable<String> listDatabaseNames();
-
-    /**
-     * Get a list of the database names
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return an iterable containing all the names of all the databases
-     * @mongodb.driver.manual reference/command/listDatabases List Databases
-     * @mongodb.server.release 3.6
-     */
-    MongoIterable<String> listDatabaseNames(ClientSession clientSession);
-
-    /**
-     * Gets the list of databases
-     *
-     * @return the list databases iterable interface
-     */
-    ListDatabasesIterable<Document> listDatabases();
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the list databases iterable interface
-     * @mongodb.driver.manual reference/command/listDatabases List Databases
-     * @mongodb.server.release 3.6
-     */
-    ListDatabasesIterable<Document> listDatabases(ClientSession clientSession);
-
-    /**
-     * Gets the list of databases
-     *
-     * @param resultClass the class to cast the database documents to
-     * @param <TResult>   the type of the class to use instead of {@code Document}.
-     * @return the list databases iterable interface
-     */
-    <TResult> ListDatabasesIterable<TResult> listDatabases(Class<TResult> resultClass);
-
-    /**
-     * Gets the list of databases
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param resultClass the class to cast the database documents to
-     * @param <TResult>   the type of the class to use instead of {@code Document}.
-     * @return the list databases iterable interface
-     * @mongodb.driver.manual reference/command/listDatabases List Databases
-     * @mongodb.server.release 3.6
-     */
-    <TResult> ListDatabasesIterable<TResult> listDatabases(ClientSession clientSession, Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     */
-    ChangeStreamIterable<Document> watch();
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     */
-    <TResult> ChangeStreamIterable<TResult> watch(Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline the aggregation pipeline to apply to the change stream.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     */
-    ChangeStreamIterable<Document> watch(List<? extends Bson> pipeline);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param pipeline    the aggregation pipeline to apply to the change stream
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     */
-    <TResult> ChangeStreamIterable<TResult> watch(List<? extends Bson> pipeline, Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @return the change stream iterable
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    ChangeStreamIterable<Document> watch(ClientSession clientSession);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    <TResult> ChangeStreamIterable<TResult> watch(ClientSession clientSession, Class<TResult> resultClass);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline the aggregation pipeline to apply to the change stream.
-     * @return the change stream iterable
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    ChangeStreamIterable<Document> watch(ClientSession clientSession, List<? extends Bson> pipeline);
-
-    /**
-     * Creates a change stream for this client.
-     *
-     * @param clientSession the client session with which to associate this operation
-     * @param pipeline    the aggregation pipeline to apply to the change stream
-     * @param resultClass the class to decode each document into
-     * @param <TResult>   the target document type of the iterable.
-     * @return the change stream iterable
-     * @since 3.8
-     * @mongodb.server.release 4.0
-     * @mongodb.driver.dochub core/changestreams Change Streams
-     */
-    <TResult> ChangeStreamIterable<TResult> watch(ClientSession clientSession, List<? extends Bson> pipeline, Class<TResult> resultClass);
-
     /**
      * Gets the current cluster description.
      *
diff --git a/driver-sync/src/main/com/mongodb/client/MongoCluster.java b/driver-sync/src/main/com/mongodb/client/MongoCluster.java
new file mode 100644
index 00000000000..f901845333b
--- /dev/null
+++ b/driver-sync/src/main/com/mongodb/client/MongoCluster.java
@@ -0,0 +1,355 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.MongoNamespace;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Immutable;
+import com.mongodb.annotations.Reason;
+import com.mongodb.lang.Nullable;
+import org.bson.Document;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * The client-side representation of a MongoDB cluster operations.
+ *
+ * <p>
+ * The originating {@link MongoClient} is responsible for the closing of resources.
+ * If the originator {@link MongoClient} is closed, then any cluster operations will fail.
+ * </p>
+ *
+ * @see MongoClient
+ * @since 5.2
+ */
+@Immutable
+public interface MongoCluster {
+
+    /**
+     * Get the codec registry for the MongoCluster.
+     *
+     * @return the {@link org.bson.codecs.configuration.CodecRegistry}
+     * @since 5.2
+     */
+    CodecRegistry getCodecRegistry();
+
+    /**
+     * Get the read preference for the MongoCluster.
+     *
+     * @return the {@link com.mongodb.ReadPreference}
+     * @since 5.2
+     */
+    ReadPreference getReadPreference();
+
+    /**
+     * Get the write concern for the MongoCluster.
+     *
+     * @return the {@link com.mongodb.WriteConcern}
+     * @since 5.2
+     */
+    WriteConcern getWriteConcern();
+
+    /**
+     * Get the read concern for the MongoCluster.
+     *
+     * @return the {@link com.mongodb.ReadConcern}
+     * @since 5.2
+     * @mongodb.driver.manual reference/readConcern/ Read Concern
+     */
+    ReadConcern getReadConcern();
+
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
+    /**
+     * Create a new MongoCluster instance with a different codec registry.
+     *
+     * <p>The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of
+     * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining
+     * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances
+     * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.</p>
+     *
+     * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the database
+     * @return a new MongoCluster instance with the different codec registry
+     * @see org.bson.codecs.configuration.CodecRegistries
+     * @since 5.2
+     */
+    MongoCluster withCodecRegistry(CodecRegistry codecRegistry);
+
+    /**
+     * Create a new MongoCluster instance with a different read preference.
+     *
+     * @param readPreference the new {@link ReadPreference} for the database
+     * @return a new MongoCluster instance with the different readPreference
+     * @since 5.2
+     */
+    MongoCluster withReadPreference(ReadPreference readPreference);
+
+    /**
+     * Create a new MongoCluster instance with a different write concern.
+     *
+     * @param writeConcern the new {@link WriteConcern} for the database
+     * @return a new MongoCluster instance with the different writeConcern
+     * @since 5.2
+     */
+    MongoCluster withWriteConcern(WriteConcern writeConcern);
+
+    /**
+     * Create a new MongoCluster instance with a different read concern.
+     *
+     * @param readConcern the new {@link ReadConcern} for the database
+     * @return a new MongoCluster instance with the different ReadConcern
+     * @since 5.2
+     * @mongodb.driver.manual reference/readConcern/ Read Concern
+     */
+    MongoCluster withReadConcern(ReadConcern readConcern);
+
+    /**
+     * Create a new MongoCluster instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoCluster instance with the set time limit for the full execution of an operation.
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoCluster withTimeout(long timeout, TimeUnit timeUnit);
+
+    /**
+     * Gets a {@link MongoDatabase} instance for the given database name.
+     *
+     * @param databaseName the name of the database to retrieve
+     * @return a {@code MongoDatabase} representing the specified database
+     * @throws IllegalArgumentException if databaseName is invalid
+     * @see MongoNamespace#checkDatabaseNameValidity(String)
+     */
+    MongoDatabase getDatabase(String databaseName);
+
+    /**
+     * Creates a client session with default options.
+     *
+     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
+     *
+     * @return the client session
+     * @mongodb.server.release 3.6
+     */
+    ClientSession startSession();
+
+    /**
+     * Creates a client session.
+     *
+     * <p>Note: A ClientSession instance can not be used concurrently in multiple operations.</p>
+     *
+     * @param options  the options for the client session
+     * @return the client session
+     * @mongodb.server.release 3.6
+     */
+    ClientSession startSession(ClientSessionOptions options);
+
+    /**
+     * Get a list of the database names
+     *
+     * @return an iterable containing all the names of all the databases
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     */
+    MongoIterable<String> listDatabaseNames();
+
+    /**
+     * Get a list of the database names
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return an iterable containing all the names of all the databases
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    MongoIterable<String> listDatabaseNames(ClientSession clientSession);
+
+    /**
+     * Gets the list of databases
+     *
+     * @return the list databases iterable interface
+     */
+    ListDatabasesIterable<Document> listDatabases();
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the list databases iterable interface
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    ListDatabasesIterable<Document> listDatabases(ClientSession clientSession);
+
+    /**
+     * Gets the list of databases
+     *
+     * @param resultClass the class to cast the database documents to
+     * @param <TResult>   the type of the class to use instead of {@code Document}.
+     * @return the list databases iterable interface
+     */
+    <TResult> ListDatabasesIterable<TResult> listDatabases(Class<TResult> resultClass);
+
+    /**
+     * Gets the list of databases
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the class to cast the database documents to
+     * @param <TResult>   the type of the class to use instead of {@code Document}.
+     * @return the list databases iterable interface
+     * @mongodb.driver.manual reference/command/listDatabases List Databases
+     * @mongodb.server.release 3.6
+     */
+    <TResult> ListDatabasesIterable<TResult> listDatabases(ClientSession clientSession, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    ChangeStreamIterable<Document> watch();
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    <TResult> ChangeStreamIterable<TResult> watch(Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline the aggregation pipeline to apply to the change stream.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    ChangeStreamIterable<Document> watch(List<? extends Bson> pipeline);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param pipeline    the aggregation pipeline to apply to the change stream
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     */
+    <TResult> ChangeStreamIterable<TResult> watch(List<? extends Bson> pipeline, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    ChangeStreamIterable<Document> watch(ClientSession clientSession);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    <TResult> ChangeStreamIterable<TResult> watch(ClientSession clientSession, Class<TResult> resultClass);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline the aggregation pipeline to apply to the change stream.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    ChangeStreamIterable<Document> watch(ClientSession clientSession, List<? extends Bson> pipeline);
+
+    /**
+     * Creates a change stream for this client.
+     *
+     * @param clientSession the client session with which to associate this operation
+     * @param pipeline    the aggregation pipeline to apply to the change stream
+     * @param resultClass the class to decode each document into
+     * @param <TResult>   the target document type of the iterable.
+     * @return the change stream iterable
+     * @since 3.8
+     * @mongodb.server.release 4.0
+     * @mongodb.driver.dochub core/changestreams Change Streams
+     */
+    <TResult> ChangeStreamIterable<TResult> watch(ClientSession clientSession, List<? extends Bson> pipeline, Class<TResult> resultClass);
+}
diff --git a/driver-sync/src/main/com/mongodb/client/MongoCollection.java b/driver-sync/src/main/com/mongodb/client/MongoCollection.java
index aa772960e65..7db38040bed 100644
--- a/driver-sync/src/main/com/mongodb/client/MongoCollection.java
+++ b/driver-sync/src/main/com/mongodb/client/MongoCollection.java
@@ -20,6 +20,8 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.bulk.BulkWriteResult;
 import com.mongodb.client.model.BulkWriteOptions;
@@ -51,6 +53,7 @@
 import org.bson.conversions.Bson;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 /**
  * The MongoCollection interface.
@@ -112,6 +115,37 @@ public interface MongoCollection<TDocument> {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into..
      *
@@ -162,6 +196,23 @@ public interface MongoCollection<TDocument> {
      */
     MongoCollection<TDocument> withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new MongoCollection instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoCollection instance with the set time limit for the full execution of an operation
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoCollection<TDocument> withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Counts the number of documents in the collection.
      *
diff --git a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java
index 364f7377d4a..1e84a91005a 100644
--- a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java
+++ b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java
@@ -19,14 +19,18 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.CreateViewOptions;
+import com.mongodb.lang.Nullable;
 import org.bson.Document;
 import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 /**
  * The MongoDatabase interface.
@@ -76,6 +80,37 @@ public interface MongoDatabase {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 5.2
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      * Create a new MongoDatabase instance with a different codec registry.
      *
@@ -117,6 +152,23 @@ public interface MongoDatabase {
      */
     MongoDatabase withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new MongoDatabase instance with the set time limit for the full execution of an operation.
+     * @since 5.2
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    MongoDatabase withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Gets a collection.
      *
@@ -140,6 +192,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command the command to be run
      * @return the command result
      */
@@ -148,6 +203,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command        the command to be run
      * @param readPreference the {@link ReadPreference} to be used when executing the command
      * @return the command result
@@ -157,6 +215,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command     the command to be run
      * @param resultClass the class to decode each document into
      * @param <TResult> the type of the class to use instead of {@code Document}.
@@ -167,6 +228,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param command        the command to be run
      * @param readPreference the {@link ReadPreference} to be used when executing the command
      * @param resultClass    the class to decode each document into
@@ -178,6 +242,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command the command to be run
      * @return the command result
@@ -189,6 +256,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command        the command to be run
      * @param readPreference the {@link ReadPreference} to be used when executing the command
@@ -201,6 +271,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param command     the command to be run
      * @param resultClass the class to decode each document into
@@ -214,6 +287,9 @@ public interface MongoDatabase {
     /**
      * Executes the given command in the context of the current database with the given read preference.
      *
+     * <p>Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the
+     * {@code timeoutMS} setting has been set.</p>
+     *
      * @param clientSession  the client session with which to associate this operation
      * @param command        the command to be run
      * @param readPreference the {@link ReadPreference} to be used when executing the command
diff --git a/driver-sync/src/main/com/mongodb/client/MongoIterable.java b/driver-sync/src/main/com/mongodb/client/MongoIterable.java
index 75ca9c34e6d..06bec548c77 100644
--- a/driver-sync/src/main/com/mongodb/client/MongoIterable.java
+++ b/driver-sync/src/main/com/mongodb/client/MongoIterable.java
@@ -74,4 +74,5 @@ public interface MongoIterable<TResult> extends Iterable<TResult> {
      * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size
      */
     MongoIterable<TResult> batchSize(int batchSize);
+
 }
diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java
index c32f114844c..5335ed4ce91 100644
--- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java
+++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java
@@ -19,16 +19,21 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
+import com.mongodb.annotations.Alpha;
+import com.mongodb.annotations.Reason;
 import com.mongodb.annotations.ThreadSafe;
 import com.mongodb.client.ClientSession;
+import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
+import com.mongodb.lang.Nullable;
 import org.bson.BsonValue;
 import org.bson.conversions.Bson;
 import org.bson.types.ObjectId;
 
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Represents a GridFS Bucket
@@ -76,6 +81,37 @@ public interface GridFSBucket {
      */
     ReadConcern getReadConcern();
 
+    /**
+     * The time limit for the full execution of an operation.
+     *
+     * <p>If not null the following deprecated options will be ignored:
+     * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}</p>
+     *
+     * <ul>
+     *   <li>{@code null} means that the timeout mechanism for operations will defer to using:
+     *    <ul>
+     *        <li>{@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become
+     *        available</li>
+     *        <li>{@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.</li>
+     *        <li>{@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.</li>
+     *        <li>{@code maxTimeMS}: The cumulative time limit for processing operations on a cursor.
+     *        See: <a href="https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS">cursor.maxTimeMS</a>.</li>
+     *        <li>{@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
+     *        See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.</li>
+     *   </ul>
+     *   </li>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeUnit the time unit
+     * @return the timeout in the given time unit
+     * @since 4.x
+     */
+    @Alpha(Reason.CLIENT)
+    @Nullable
+    Long getTimeout(TimeUnit timeUnit);
+
     /**
      *  Create a new GridFSBucket instance with a new chunk size in bytes.
      *
@@ -111,6 +147,23 @@ public interface GridFSBucket {
      */
     GridFSBucket withReadConcern(ReadConcern readConcern);
 
+    /**
+     * Create a new GridFSBucket instance with the set time limit for the full execution of an operation.
+     *
+     * <ul>
+     *   <li>{@code 0} means infinite timeout.</li>
+     *    <li>{@code > 0} The time limit to use for the full execution of an operation.</li>
+     * </ul>
+     *
+     * @param timeout the timeout, which must be greater than or equal to 0
+     * @param timeUnit the time unit
+     * @return a new GridFSBucket instance with the set time limit for the full execution of an operation
+     * @since 4.x
+     * @see #getTimeout
+     */
+    @Alpha(Reason.CLIENT)
+    GridFSBucket withTimeout(long timeout, TimeUnit timeUnit);
+
     /**
      * Opens a Stream that the application can write the contents of the file to.
      *<p>
@@ -296,6 +349,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      *</p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param id the custom id value of the file
      * @param filename the filename for the stream
      * @param source the Stream providing the file data
@@ -310,6 +367,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      * </p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param id the custom id value of the file
      * @param filename the filename for the stream
      * @param source the Stream providing the file data
@@ -325,6 +386,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      *</p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param filename the filename for the stream
      * @param source the Stream providing the file data
@@ -341,6 +406,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      * </p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param filename the filename for the stream
      * @param source the Stream providing the file data
@@ -358,6 +427,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      *</p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param id the custom id value of the file
      * @param filename the filename for the stream
@@ -374,6 +447,10 @@ public interface GridFSBucket {
      * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection.
      * </p>
      *
+     <p> Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase}
+     * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream}
+     * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.</p>
+     *
      * @param clientSession the client session with which to associate this operation
      * @param id the custom id value of the file
      * @param filename the filename for the stream
diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java
index 963093af6f7..20ac8fc6d44 100644
--- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java
@@ -18,6 +18,7 @@
 
 import com.mongodb.MongoClientSettings;
 import com.mongodb.MongoGridFSException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
@@ -26,12 +27,17 @@
 import com.mongodb.client.ListIndexesIterable;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.MongoDatabase;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
 import com.mongodb.client.gridfs.model.GridFSFile;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
+import com.mongodb.client.internal.TimeoutHelper;
 import com.mongodb.client.model.IndexOptions;
 import com.mongodb.client.result.DeleteResult;
 import com.mongodb.client.result.UpdateResult;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 import org.bson.BsonObjectId;
@@ -46,14 +52,17 @@
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.ReadPreference.primary;
 import static com.mongodb.assertions.Assertions.notNull;
 import static java.lang.String.format;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.codecs.configuration.CodecRegistries.fromRegistries;
 
 final class GridFSBucketImpl implements GridFSBucket {
     private static final int DEFAULT_CHUNKSIZE_BYTES = 255 * 1024;
+    private static final String TIMEOUT_MESSAGE = "GridFS operation exceeded the timeout limit.";
     private final String bucketName;
     private final int chunkSizeBytes;
     private final MongoCollection<GridFSFile> filesCollection;
@@ -70,6 +79,7 @@ final class GridFSBucketImpl implements GridFSBucket {
                 getChunksCollection(database, bucketName));
     }
 
+    @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE)
     GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection<GridFSFile> filesCollection,
                      final MongoCollection<BsonDocument> chunksCollection) {
         this.bucketName = notNull("bucketName", bucketName);
@@ -103,6 +113,11 @@ public ReadConcern getReadConcern() {
         return filesCollection.getReadConcern();
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return filesCollection.getTimeout(timeUnit);
+    }
+
     @Override
     public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) {
         return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection);
@@ -126,6 +141,12 @@ public GridFSBucket withReadConcern(final ReadConcern readConcern) {
                 chunksCollection.withReadConcern(readConcern));
     }
 
+    @Override
+    public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit),
+                chunksCollection.withTimeout(timeout, timeUnit));
+    }
+
     @Override
     public GridFSUploadStream openUploadStream(final String filename) {
         return openUploadStream(new BsonObjectId(), filename);
@@ -176,12 +197,14 @@ public GridFSUploadStream openUploadStream(final ClientSession clientSession, fi
 
     private GridFSUploadStream createGridFSUploadStream(@Nullable final ClientSession clientSession, final BsonValue id,
                                                         final String filename, final GridFSUploadOptions options) {
+        Timeout operationTimeout = startTimeout();
         notNull("options", options);
         Integer chunkSizeBytes = options.getChunkSizeBytes();
         int chunkSize = chunkSizeBytes == null ? this.chunkSizeBytes : chunkSizeBytes;
-        checkCreateIndex(clientSession);
-        return new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, id, filename, chunkSize,
-                options.getMetadata());
+        checkCreateIndex(clientSession, operationTimeout);
+        return new GridFSUploadStreamImpl(clientSession, filesCollection,
+                chunksCollection, id, filename, chunkSize,
+                options.getMetadata(), operationTimeout);
     }
 
     @Override
@@ -257,7 +280,10 @@ public GridFSDownloadStream openDownloadStream(final ObjectId id) {
 
     @Override
     public GridFSDownloadStream openDownloadStream(final BsonValue id) {
-        return createGridFSDownloadStream(null, getFileInfoById(null, id));
+        Timeout operationTimeout = startTimeout();
+
+        GridFSFile fileInfo = getFileInfoById(null, id, operationTimeout);
+        return createGridFSDownloadStream(null, fileInfo, operationTimeout);
     }
 
     @Override
@@ -267,7 +293,9 @@ public GridFSDownloadStream openDownloadStream(final String filename) {
 
     @Override
     public GridFSDownloadStream openDownloadStream(final String filename, final GridFSDownloadOptions options) {
-        return createGridFSDownloadStream(null, getFileByName(null, filename, options));
+        Timeout operationTimeout = startTimeout();
+        GridFSFile file = getFileByName(null, filename, options, operationTimeout);
+        return createGridFSDownloadStream(null, file, operationTimeout);
     }
 
     @Override
@@ -278,7 +306,9 @@ public GridFSDownloadStream openDownloadStream(final ClientSession clientSession
     @Override
     public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final BsonValue id) {
         notNull("clientSession", clientSession);
-        return createGridFSDownloadStream(clientSession, getFileInfoById(clientSession, id));
+        Timeout operationTimeout = startTimeout();
+        GridFSFile fileInfoById = getFileInfoById(clientSession, id, operationTimeout);
+        return createGridFSDownloadStream(clientSession, fileInfoById, operationTimeout);
     }
 
     @Override
@@ -290,11 +320,14 @@ public GridFSDownloadStream openDownloadStream(final ClientSession clientSession
     public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename,
                                                    final GridFSDownloadOptions options) {
         notNull("clientSession", clientSession);
-        return createGridFSDownloadStream(clientSession, getFileByName(clientSession, filename, options));
+        Timeout operationTimeout = startTimeout();
+        GridFSFile file = getFileByName(clientSession, filename, options, operationTimeout);
+        return createGridFSDownloadStream(clientSession, file, operationTimeout);
     }
 
-    private GridFSDownloadStream createGridFSDownloadStream(@Nullable final ClientSession clientSession, final GridFSFile gridFSFile) {
-        return new GridFSDownloadStreamImpl(clientSession, gridFSFile, chunksCollection);
+    private GridFSDownloadStream createGridFSDownloadStream(@Nullable final ClientSession clientSession, final GridFSFile gridFSFile,
+                                                            @Nullable final Timeout operationTimeout) {
+        return new GridFSDownloadStreamImpl(clientSession, gridFSFile, chunksCollection, operationTimeout);
     }
 
     @Override
@@ -365,7 +398,12 @@ public GridFSFindIterable find(final ClientSession clientSession, final Bson fil
     }
 
     private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter) {
-        return new GridFSFindIterableImpl(createFindIterable(clientSession, filter));
+        return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, startTimeout()));
+    }
+
+    private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter,
+                                                        @Nullable final Timeout operationTimeout) {
+        return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, operationTimeout));
     }
 
     @Override
@@ -390,13 +428,18 @@ public void delete(final ClientSession clientSession, final BsonValue id) {
     }
 
     private void executeDelete(@Nullable final ClientSession clientSession, final BsonValue id) {
+        Timeout operationTimeout = startTimeout();
         DeleteResult result;
         if (clientSession != null) {
-            result = filesCollection.deleteOne(clientSession, new BsonDocument("_id", id));
-            chunksCollection.deleteMany(clientSession, new BsonDocument("files_id", id));
+            result = withNullableTimeout(filesCollection, operationTimeout)
+                    .deleteOne(clientSession, new BsonDocument("_id", id));
+            withNullableTimeout(chunksCollection, operationTimeout)
+                    .deleteMany(clientSession, new BsonDocument("files_id", id));
         } else {
-            result = filesCollection.deleteOne(new BsonDocument("_id", id));
-            chunksCollection.deleteMany(new BsonDocument("files_id", id));
+            result = withNullableTimeout(filesCollection, operationTimeout)
+                    .deleteOne(new BsonDocument("_id", id));
+            withNullableTimeout(chunksCollection, operationTimeout)
+                    .deleteMany(new BsonDocument("files_id", id));
         }
 
         if (result.wasAcknowledged() && result.getDeletedCount() == 0) {
@@ -426,12 +469,13 @@ public void rename(final ClientSession clientSession, final BsonValue id, final
     }
 
     private void executeRename(@Nullable final ClientSession clientSession, final BsonValue id, final String newFilename) {
+        Timeout operationTimeout = startTimeout();
         UpdateResult updateResult;
         if (clientSession != null) {
-            updateResult = filesCollection.updateOne(clientSession, new BsonDocument("_id", id),
+            updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(clientSession, new BsonDocument("_id", id),
                     new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename))));
         } else {
-            updateResult = filesCollection.updateOne(new BsonDocument("_id", id),
+            updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(new BsonDocument("_id", id),
                     new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename))));
         }
 
@@ -442,15 +486,17 @@ private void executeRename(@Nullable final ClientSession clientSession, final Bs
 
     @Override
     public void drop() {
-        filesCollection.drop();
-        chunksCollection.drop();
+        Timeout operationTimeout = startTimeout();
+        withNullableTimeout(filesCollection, operationTimeout).drop();
+        withNullableTimeout(chunksCollection, operationTimeout).drop();
     }
 
     @Override
     public void drop(final ClientSession clientSession) {
+        Timeout operationTimeout = startTimeout();
         notNull("clientSession", clientSession);
-        filesCollection.drop(clientSession);
-        chunksCollection.drop(clientSession);
+        withNullableTimeout(filesCollection, operationTimeout).drop(clientSession);
+        withNullableTimeout(chunksCollection, operationTimeout).drop(clientSession);
     }
 
     private static MongoCollection<GridFSFile> getFilesCollection(final MongoDatabase database, final String bucketName) {
@@ -463,37 +509,45 @@ private static MongoCollection<BsonDocument> getChunksCollection(final MongoData
         return database.getCollection(bucketName + ".chunks", BsonDocument.class).withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry());
     }
 
-    private void checkCreateIndex(@Nullable final ClientSession clientSession) {
+    private void checkCreateIndex(@Nullable final ClientSession clientSession, @Nullable final Timeout operationTimeout) {
         if (!checkedIndexes) {
-            if (collectionIsEmpty(clientSession, filesCollection.withDocumentClass(Document.class).withReadPreference(primary()))) {
+            if (collectionIsEmpty(clientSession,
+                    filesCollection.withDocumentClass(Document.class).withReadPreference(primary()),
+                    operationTimeout)) {
+
                 Document filesIndex = new Document("filename", 1).append("uploadDate", 1);
-                if (!hasIndex(clientSession, filesCollection.withReadPreference(primary()), filesIndex)) {
-                    createIndex(clientSession, filesCollection, filesIndex, new IndexOptions());
+                if (!hasIndex(clientSession, filesCollection.withReadPreference(primary()), filesIndex, operationTimeout)) {
+                    createIndex(clientSession, filesCollection, filesIndex, new IndexOptions(), operationTimeout);
                 }
                 Document chunksIndex = new Document("files_id", 1).append("n", 1);
-                if (!hasIndex(clientSession, chunksCollection.withReadPreference(primary()), chunksIndex)) {
-                    createIndex(clientSession, chunksCollection, chunksIndex, new IndexOptions().unique(true));
+                if (!hasIndex(clientSession, chunksCollection.withReadPreference(primary()), chunksIndex, operationTimeout)) {
+                    createIndex(clientSession, chunksCollection, chunksIndex, new IndexOptions().unique(true), operationTimeout);
                 }
             }
             checkedIndexes = true;
         }
     }
 
-    private <T> boolean collectionIsEmpty(@Nullable final ClientSession clientSession, final MongoCollection<T> collection) {
+    private <T> boolean collectionIsEmpty(@Nullable final ClientSession clientSession,
+                                          final MongoCollection<T> collection,
+                                          @Nullable final Timeout operationTimeout) {
         if (clientSession != null) {
-            return collection.find(clientSession).projection(new Document("_id", 1)).first() == null;
+            return withNullableTimeout(collection, operationTimeout)
+                    .find(clientSession).projection(new Document("_id", 1)).first() == null;
         } else {
-            return collection.find().projection(new Document("_id", 1)).first() == null;
+            return withNullableTimeout(collection, operationTimeout)
+                    .find().projection(new Document("_id", 1)).first() == null;
         }
     }
 
-    private <T> boolean hasIndex(@Nullable final ClientSession clientSession, final MongoCollection<T> collection, final Document index) {
+    private <T> boolean hasIndex(@Nullable final ClientSession clientSession, final MongoCollection<T> collection,
+                                 final Document index, @Nullable final Timeout operationTimeout) {
         boolean hasIndex = false;
         ListIndexesIterable<Document> listIndexesIterable;
         if (clientSession != null) {
-            listIndexesIterable = collection.listIndexes(clientSession);
+            listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes(clientSession);
         } else {
-            listIndexesIterable = collection.listIndexes();
+            listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes();
         }
 
         ArrayList<Document> indexes = listIndexesIterable.into(new ArrayList<>());
@@ -513,16 +567,16 @@ private <T> boolean hasIndex(@Nullable final ClientSession clientSession, final
     }
 
     private <T> void createIndex(@Nullable final ClientSession clientSession, final MongoCollection<T> collection, final Document index,
-                                 final IndexOptions indexOptions) {
-       if (clientSession != null) {
-           collection.createIndex(clientSession, index, indexOptions);
-       } else {
-           collection.createIndex(index, indexOptions);
-       }
+                                 final IndexOptions indexOptions, final @Nullable Timeout operationTimeout) {
+        if (clientSession != null) {
+            withNullableTimeout(collection, operationTimeout).createIndex(clientSession, index, indexOptions);
+        } else {
+            withNullableTimeout(collection, operationTimeout).createIndex(index, indexOptions);
+        }
     }
 
     private GridFSFile getFileByName(@Nullable final ClientSession clientSession, final String filename,
-                                     final GridFSDownloadOptions options) {
+                                     final GridFSDownloadOptions options, @Nullable final Timeout operationTimeout) {
         int revision = options.getRevision();
         int skip;
         int sort;
@@ -534,7 +588,7 @@ private GridFSFile getFileByName(@Nullable final ClientSession clientSession, fi
             sort = -1;
         }
 
-        GridFSFile fileInfo = createGridFSFindIterable(clientSession, new Document("filename", filename)).skip(skip)
+        GridFSFile fileInfo = createGridFSFindIterable(clientSession, new Document("filename", filename), operationTimeout).skip(skip)
                 .sort(new Document("uploadDate", sort)).first();
         if (fileInfo == null) {
             throw new MongoGridFSException(format("No file found with the filename: %s and revision: %s", filename, revision));
@@ -542,25 +596,30 @@ private GridFSFile getFileByName(@Nullable final ClientSession clientSession, fi
         return fileInfo;
     }
 
-    private GridFSFile getFileInfoById(@Nullable final ClientSession clientSession, final BsonValue id) {
+    private GridFSFile getFileInfoById(@Nullable final ClientSession clientSession, final BsonValue id,
+                                       @Nullable final Timeout operationTImeout) {
         notNull("id", id);
-        GridFSFile fileInfo = createFindIterable(clientSession, new Document("_id", id)).first();
+        GridFSFile fileInfo = createFindIterable(clientSession, new Document("_id", id), operationTImeout).first();
         if (fileInfo == null) {
             throw new MongoGridFSException(format("No file found with the id: %s", id));
         }
         return fileInfo;
     }
 
-    private FindIterable<GridFSFile> createFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter) {
+    private FindIterable<GridFSFile> createFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter,
+                                                        @Nullable final Timeout operationTImeout) {
         FindIterable<GridFSFile> findIterable;
         if (clientSession != null) {
-            findIterable = filesCollection.find(clientSession);
+            findIterable = withNullableTimeout(filesCollection, operationTImeout).find(clientSession);
         } else {
-            findIterable = filesCollection.find();
+            findIterable = withNullableTimeout(filesCollection, operationTImeout).find();
         }
         if (filter != null) {
             findIterable = findIterable.filter(filter);
         }
+        if (filesCollection.getTimeout(MILLISECONDS) != null) {
+            findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME);
+        }
         return findIterable;
     }
 
@@ -572,6 +631,8 @@ private void downloadToStream(final GridFSDownloadStream downloadStream, final O
             while ((len = downloadStream.read(buffer)) != -1) {
                 destination.write(buffer, 0, len);
             }
+        } catch (MongoOperationTimeoutException e){
+            throw e;
         } catch (IOException e) {
             savedThrowable = new MongoGridFSException("IOException when reading from the OutputStream", e);
         } catch (Exception e) {
@@ -587,4 +648,14 @@ private void downloadToStream(final GridFSDownloadStream downloadStream, final O
             }
         }
     }
+
+    private static <T> MongoCollection<T> withNullableTimeout(final MongoCollection<T> chunksCollection,
+                                                              @Nullable final Timeout timeout) {
+        return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout);
+    }
+
+    @Nullable
+    private Timeout startTimeout() {
+        return TimeoutContext.startTimeout(filesCollection.getTimeout(MILLISECONDS));
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java
index c9f6607d144..709ae68138b 100644
--- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java
@@ -21,7 +21,10 @@
 import com.mongodb.client.FindIterable;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.MongoCursor;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.gridfs.model.GridFSFile;
+import com.mongodb.client.internal.TimeoutHelper;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinary;
 import org.bson.BsonDocument;
@@ -33,13 +36,18 @@
 import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.internal.Locks.withInterruptibleLock;
+import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException;
 import static java.lang.String.format;
 
 class GridFSDownloadStreamImpl extends GridFSDownloadStream {
+    private static final String TIMEOUT_MESSAGE = "The GridFS download stream exceeded the timeout limit.";
     private final ClientSession clientSession;
     private final GridFSFile fileInfo;
     private final MongoCollection<BsonDocument> chunksCollection;
     private final BsonValue fileId;
+    /**
+     * The length, in bytes of the file to download.
+     */
     private final long length;
     private final int chunkSizeInBytes;
     private final int numberOfChunks;
@@ -47,16 +55,20 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream {
     private int batchSize;
     private int chunkIndex;
     private int bufferOffset;
+    /**
+     * Current byte position in the file.
+     */
     private long currentPosition;
     private byte[] buffer = null;
     private long markPosition;
-
+    @Nullable
+    private final Timeout timeout;
     private final ReentrantLock closeLock = new ReentrantLock();
     private final ReentrantLock cursorLock = new ReentrantLock();
     private boolean closed = false;
 
     GridFSDownloadStreamImpl(@Nullable final ClientSession clientSession, final GridFSFile fileInfo,
-                             final MongoCollection<BsonDocument> chunksCollection) {
+                             final MongoCollection<BsonDocument> chunksCollection, @Nullable final Timeout timeout) {
         this.clientSession = clientSession;
         this.fileInfo = notNull("file information", fileInfo);
         this.chunksCollection = notNull("chunks collection",  chunksCollection);
@@ -65,6 +77,7 @@ class GridFSDownloadStreamImpl extends GridFSDownloadStream {
         length = fileInfo.getLength();
         chunkSizeInBytes = fileInfo.getChunkSize();
         numberOfChunks = (int) Math.ceil((double) length / chunkSizeInBytes);
+        this.timeout = timeout;
     }
 
     @Override
@@ -98,6 +111,7 @@ public int read(final byte[] b) {
     @Override
     public int read(final byte[] b, final int off, final int len) {
         checkClosed();
+        checkTimeout();
 
         if (currentPosition == length) {
             return -1;
@@ -119,6 +133,7 @@ public int read(final byte[] b, final int off, final int len) {
     @Override
     public long skip(final long bytesToSkip) {
         checkClosed();
+        checkTimeout();
         if (bytesToSkip <= 0) {
             return 0;
         }
@@ -147,6 +162,7 @@ public long skip(final long bytesToSkip) {
     @Override
     public int available() {
         checkClosed();
+        checkTimeout();
         if (buffer == null) {
             return 0;
         } else {
@@ -167,6 +183,7 @@ public void mark(final int readlimit) {
     @Override
     public void reset() {
         checkClosed();
+        checkTimeout();
         if (currentPosition == markPosition) {
             return;
         }
@@ -196,6 +213,11 @@ public void close() {
         });
     }
 
+    private void checkTimeout() {
+        Timeout.onExistsAndExpired(timeout, () -> {
+            throw createMongoTimeoutException(TIMEOUT_MESSAGE);
+        });
+    }
     private void checkClosed() {
         withInterruptibleLock(closeLock, () -> {
             if (closed) {
@@ -237,11 +259,15 @@ private MongoCursor<BsonDocument> getCursor(final int startChunkIndex) {
         FindIterable<BsonDocument> findIterable;
         BsonDocument filter = new BsonDocument("files_id", fileId).append("n", new BsonDocument("$gte", new BsonInt32(startChunkIndex)));
         if (clientSession != null) {
-            findIterable = chunksCollection.find(clientSession, filter);
+            findIterable = withNullableTimeout(chunksCollection, timeout).find(clientSession, filter);
         } else {
-            findIterable = chunksCollection.find(filter);
+            findIterable =  withNullableTimeout(chunksCollection, timeout).find(filter);
         }
-        return findIterable.batchSize(batchSize).sort(new BsonDocument("n", new BsonInt32(1))).iterator();
+        if (timeout != null){
+             findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME);
+        }
+        return findIterable.batchSize(batchSize)
+                .sort(new BsonDocument("n", new BsonInt32(1))).iterator();
     }
 
     private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int expectedChunkIndex) {
@@ -280,4 +306,9 @@ private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int
     private byte[] getBuffer(final int chunkIndexToFetch) {
         return getBufferFromChunk(getChunk(chunkIndexToFetch), chunkIndexToFetch);
     }
+
+    private <T> MongoCollection<T> withNullableTimeout(final MongoCollection<T> chunksCollection,
+                                                       @Nullable final Timeout timeout) {
+        return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout);
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java
index 26ef5f85934..240cecf78b3 100644
--- a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java
@@ -20,6 +20,9 @@
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.gridfs.model.GridFSFile;
+import com.mongodb.client.internal.TimeoutHelper;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinary;
 import org.bson.BsonDocument;
@@ -35,6 +38,7 @@
 import static com.mongodb.internal.Locks.withInterruptibleLock;
 
 final class GridFSUploadStreamImpl extends GridFSUploadStream {
+    public static final String TIMEOUT_MESSAGE = "The GridFS upload stream exceeded the timeout limit.";
     private final ClientSession clientSession;
     private final MongoCollection<GridFSFile> filesCollection;
     private final MongoCollection<BsonDocument> chunksCollection;
@@ -46,13 +50,14 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream {
     private long lengthInBytes;
     private int bufferOffset;
     private int chunkIndex;
-
+    @Nullable
+    private final Timeout timeout;
     private final ReentrantLock closeLock = new ReentrantLock();
     private boolean closed = false;
 
     GridFSUploadStreamImpl(@Nullable final ClientSession clientSession, final MongoCollection<GridFSFile> filesCollection,
                            final MongoCollection<BsonDocument> chunksCollection, final BsonValue fileId, final String filename,
-                           final int chunkSizeBytes, @Nullable final Document metadata) {
+                           final int chunkSizeBytes, @Nullable final Document metadata, @Nullable final Timeout timeout) {
         this.clientSession = clientSession;
         this.filesCollection = notNull("files collection", filesCollection);
         this.chunksCollection = notNull("chunks collection", chunksCollection);
@@ -63,6 +68,7 @@ final class GridFSUploadStreamImpl extends GridFSUploadStream {
         chunkIndex = 0;
         bufferOffset = 0;
         buffer = new byte[chunkSizeBytes];
+        this.timeout = timeout;
     }
 
     @Override
@@ -86,9 +92,11 @@ public void abort() {
         });
 
         if (clientSession != null) {
-            chunksCollection.deleteMany(clientSession, new Document("files_id", fileId));
+            withNullableTimeout(chunksCollection, timeout)
+                    .deleteMany(clientSession, new Document("files_id", fileId));
         } else {
-            chunksCollection.deleteMany(new Document("files_id", fileId));
+            withNullableTimeout(chunksCollection, timeout)
+                    .deleteMany(new Document("files_id", fileId));
         }
     }
 
@@ -107,6 +115,7 @@ public void write(final byte[] b) {
     @Override
     public void write(final byte[] b, final int off, final int len) {
         checkClosed();
+        checkTimeout();
         notNull("b", b);
 
         if ((off < 0) || (off > b.length) || (len < 0)
@@ -138,6 +147,10 @@ public void write(final byte[] b, final int off, final int len) {
         }
     }
 
+    private void checkTimeout() {
+        Timeout.onExistsAndExpired(timeout, () -> TimeoutContext.throwMongoTimeoutException(TIMEOUT_MESSAGE));
+    }
+
     @Override
     public void close() {
         boolean alreadyClosed = withInterruptibleLock(closeLock, () -> {
@@ -152,9 +165,9 @@ public void close() {
         GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(),
                 metadata);
         if (clientSession != null) {
-            filesCollection.insertOne(clientSession, gridFSFile);
+            withNullableTimeout(filesCollection, timeout).insertOne(clientSession, gridFSFile);
         } else {
-            filesCollection.insertOne(gridFSFile);
+            withNullableTimeout(filesCollection, timeout).insertOne(gridFSFile);
         }
         buffer = null;
     }
@@ -162,10 +175,15 @@ public void close() {
     private void writeChunk() {
         if (bufferOffset > 0) {
             if (clientSession != null) {
-                chunksCollection.insertOne(clientSession, new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex))
-                        .append("data", getData()));
+                withNullableTimeout(chunksCollection, timeout)
+                        .insertOne(clientSession, new BsonDocument("files_id", fileId)
+                                .append("n", new BsonInt32(chunkIndex))
+                                .append("data", getData()));
             } else {
-                chunksCollection.insertOne(new BsonDocument("files_id", fileId).append("n", new BsonInt32(chunkIndex)).append("data", getData()));
+                withNullableTimeout(chunksCollection, timeout)
+                        .insertOne(new BsonDocument("files_id", fileId)
+                                .append("n", new BsonInt32(chunkIndex))
+                                .append("data", getData()));
             }
             chunkIndex++;
             bufferOffset = 0;
@@ -188,4 +206,9 @@ private void checkClosed() {
             }
         });
     }
+
+    private static <T> MongoCollection<T> withNullableTimeout(final MongoCollection<T> collection,
+                                                             @Nullable final Timeout timeout) {
+        return TimeoutHelper.collectionWithTimeout(collection, TIMEOUT_MESSAGE, timeout);
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java
index 6559e029d4e..23c8fb35283 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java
@@ -23,7 +23,9 @@
 import com.mongodb.WriteConcern;
 import com.mongodb.client.AggregateIterable;
 import com.mongodb.client.ClientSession;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.operation.BatchCursor;
@@ -62,29 +64,25 @@ class AggregateIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResul
     private String hintString;
     private Bson variables;
 
+    @SuppressWarnings("checkstyle:ParameterNumber")
     AggregateIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final Class<TDocument> documentClass,
-                          final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                          final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
-                          final List<? extends Bson> pipeline, final AggregationLevel aggregationLevel) {
+            final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
+            final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
+            final List<? extends Bson> pipeline, final AggregationLevel aggregationLevel, final boolean retryReads,
+            final TimeoutSettings timeoutSettings) {
         this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, aggregationLevel, true);
-    }
-
-    AggregateIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final Class<TDocument> documentClass,
-                          final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                          final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
-                          final List<? extends Bson> pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) {
-        this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads);
+                readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads, timeoutSettings);
     }
 
+    @SuppressWarnings("checkstyle:ParameterNumber")
     AggregateIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
-                          final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                          final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
-                          final List<? extends Bson> pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) {
-        super(clientSession, executor, readConcern, readPreference, retryReads);
+            final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
+            final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
+            final List<? extends Bson> pipeline, final AggregationLevel aggregationLevel, final boolean retryReads,
+            final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings);
         this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern,
-                true, retryReads);
+                true, retryReads, timeoutSettings);
         this.namespace = notNull("namespace", namespace);
         this.documentClass = notNull("documentClass", documentClass);
         this.resultClass = notNull("resultClass", resultClass);
@@ -100,8 +98,10 @@ public void toCollection() {
             throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge");
         }
 
-        getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint,
-                hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession());
+        getExecutor().execute(
+                operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse,
+                        bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel),
+                getReadPreference(), getReadConcern(), getClientSession());
     }
 
     @Override
@@ -116,6 +116,12 @@ public AggregateIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public AggregateIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public AggregateIterable<TResult> maxTime(final long maxTime, final TimeUnit timeUnit) {
         notNull("timeUnit", timeUnit);
@@ -125,8 +131,7 @@ public AggregateIterable<TResult> maxTime(final long maxTime, final TimeUnit tim
 
     @Override
     public AggregateIterable<TResult> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
+        this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
 
@@ -194,16 +199,20 @@ public <E> E explain(final Class<E> explainResultClass, final ExplainVerbosity v
 
     private <E> E executeExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
         notNull("explainDocumentClass", explainResultClass);
-        return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)),
-                getReadPreference(), getReadConcern(), getClientSession());
+        return getExecutor().execute(
+                asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(),
+                getReadConcern(), getClientSession());
     }
 
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
         MongoNamespace outNamespace = getOutNamespace();
         if (outNamespace != null) {
-            getExecutor().execute(operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation,
-                    hint, hintString, comment, variables, aggregationLevel), getReadPreference(), getReadConcern(), getClientSession());
+            validateTimeoutMode();
+            getExecutor().execute(
+                    operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse,
+                            bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel),
+                    getReadPreference(), getReadConcern(), getClientSession());
 
             FindOptions findOptions = new FindOptions().collation(collation);
             Integer batchSize = getBatchSize();
@@ -216,9 +225,13 @@ public ReadOperation<BatchCursor<TResult>> asReadOperation() {
         }
     }
 
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS));
+    }
+
     private ExplainableReadOperation<BatchCursor<TResult>> asAggregateOperation() {
-        return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, getBatchSize(), collation,
-                hint, hintString, comment, variables, allowDiskUse, aggregationLevel);
+        return operations.aggregate(pipeline, resultClass, getTimeoutMode(), getBatchSize(), collation, hint, hintString, comment,
+                variables, allowDiskUse, aggregationLevel);
     }
 
     @Nullable
@@ -269,4 +282,11 @@ private MongoNamespace getOutNamespace() {
 
         return null;
     }
+
+    private void validateTimeoutMode() {
+        if (getTimeoutMode() == TimeoutMode.ITERATION) {
+            throw new IllegalArgumentException("Aggregations that output to a collection do not support the ITERATION value for the "
+                    + "timeoutMode option.");
+        }
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java
index d50b20cf0e9..4b7b3865569 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java
@@ -28,6 +28,7 @@
 import com.mongodb.client.model.changestream.ChangeStreamDocument;
 import com.mongodb.client.model.changestream.FullDocument;
 import com.mongodb.client.model.changestream.FullDocumentBeforeChange;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
@@ -47,7 +48,6 @@
 import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 /**
  * <p>This class is not part of the public API and may be removed or changed at any time</p>
@@ -70,23 +70,23 @@ public class ChangeStreamIterableImpl<TResult> extends MongoIterableImpl<ChangeS
     private boolean showExpandedEvents;
 
     public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final String databaseName,
-                                    final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern,
-                                    final OperationExecutor executor, final List<? extends Bson> pipeline, final Class<TResult> resultClass,
-                                    final ChangeStreamLevel changeStreamLevel, final boolean retryReads) {
+            final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern,
+            final OperationExecutor executor, final List<? extends Bson> pipeline, final Class<TResult> resultClass,
+            final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) {
         this(clientSession, new MongoNamespace(databaseName, "ignored"), codecRegistry, readPreference, readConcern, executor, pipeline,
-                resultClass, changeStreamLevel, retryReads);
+                resultClass, changeStreamLevel, retryReads, timeoutSettings);
     }
 
     public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace,
                                     final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern,
                                     final OperationExecutor executor, final List<? extends Bson> pipeline, final Class<TResult> resultClass,
-                                    final ChangeStreamLevel changeStreamLevel, final boolean retryReads) {
-        super(clientSession, executor, readConcern, readPreference, retryReads);
+                                    final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings);
         this.codecRegistry = notNull("codecRegistry", codecRegistry);
         this.pipeline = notNull("pipeline", pipeline);
         this.codec = ChangeStreamDocument.createCodec(notNull("resultClass", resultClass), codecRegistry);
         this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel);
-        this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads);
+        this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads, timeoutSettings);
     }
 
     @Override
@@ -115,8 +115,7 @@ public ChangeStreamIterable<TResult> batchSize(final int batchSize) {
 
     @Override
     public ChangeStreamIterable<TResult> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
-        this.maxAwaitTimeMS = MILLISECONDS.convert(maxAwaitTime, timeUnit);
+        this.maxAwaitTimeMS =  validateMaxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
 
@@ -128,7 +127,8 @@ public ChangeStreamIterable<TResult> collation(@Nullable final Collation collati
 
     @Override
     public <TDocument> MongoIterable<TDocument> withDocumentClass(final Class<TDocument> clazz) {
-        return new MongoIterableImpl<TDocument>(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads()) {
+        return new MongoIterableImpl<TDocument>(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads(),
+                getTimeoutSettings()) {
             @Override
             public MongoCursor<TDocument> iterator() {
                 return cursor();
@@ -143,6 +143,12 @@ public MongoChangeStreamCursor<TDocument> cursor() {
             public ReadOperation<BatchCursor<TDocument>> asReadOperation() {
                 throw new UnsupportedOperationException();
             }
+
+            @Override
+
+            protected OperationExecutor getExecutor() {
+                return ChangeStreamIterableImpl.this.getExecutor();
+            }
         };
     }
 
@@ -203,9 +209,14 @@ public ReadOperation<BatchCursor<ChangeStreamDocument<TResult>>> asReadOperation
         throw new UnsupportedOperationException();
     }
 
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(0, maxAwaitTimeMS));
+    }
+
     private ReadOperation<BatchCursor<RawBsonDocument>> createChangeStreamOperation() {
         return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, new RawBsonDocumentCodec(), changeStreamLevel,
-                getBatchSize(), collation, comment, maxAwaitTimeMS, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
+                getBatchSize(), collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents);
     }
 
     private BatchCursor<RawBsonDocument> execute() {
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java
index fad1c711d64..3edef6b937d 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java
@@ -22,6 +22,7 @@
 import com.mongodb.MongoUpdatedEncryptedFieldsException;
 import com.mongodb.ReadConcern;
 import com.mongodb.WriteConcern;
+import com.mongodb.bulk.BulkWriteResult;
 import com.mongodb.client.FindIterable;
 import com.mongodb.client.MongoClient;
 import com.mongodb.client.MongoClients;
@@ -38,7 +39,10 @@
 import com.mongodb.client.model.vault.RewrapManyDataKeyResult;
 import com.mongodb.client.result.DeleteResult;
 import com.mongodb.client.vault.ClientEncryption;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.VisibleForTesting;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
 import org.bson.BsonBinary;
 import org.bson.BsonDocument;
@@ -54,11 +58,14 @@
 import java.util.stream.Collectors;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout;
 import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE;
 import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions;
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.internal.BsonUtil.mutableDeepCopy;
 
 /**
@@ -80,10 +87,22 @@ public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryp
         this.crypt = Crypts.create(keyVaultClient, options);
         this.options = options;
         MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace());
-        this.collection = keyVaultClient.getDatabase(namespace.getDatabaseName())
+        this.collection = getVaultCollection(keyVaultClient, options, namespace);
+    }
+
+    private static MongoCollection<BsonDocument> getVaultCollection(final MongoClient keyVaultClient,
+                                                                    final ClientEncryptionSettings options,
+                                                                    final MongoNamespace namespace) {
+        MongoCollection<BsonDocument> vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName())
                 .getCollection(namespace.getCollectionName(), BsonDocument.class)
                 .withWriteConcern(WriteConcern.MAJORITY)
                 .withReadConcern(ReadConcern.MAJORITY);
+
+        Long timeoutMs = options.getTimeout(MILLISECONDS);
+        if (timeoutMs != null){
+            vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS);
+        }
+        return vaultCollection;
     }
 
     @Override
@@ -93,39 +112,48 @@ public BsonBinary createDataKey(final String kmsProvider) {
 
     @Override
     public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) {
-        BsonDocument dataKeyDocument = crypt.createDataKey(kmsProvider, dataKeyOptions);
-        collection.insertOne(dataKeyDocument);
+        Timeout operationTimeout = startTimeout();
+       return createDataKey(kmsProvider, dataKeyOptions, operationTimeout);
+    }
+
+    public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) {
+        BsonDocument dataKeyDocument = crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout);
+        collectionWithTimeout(collection, "Data key insertion exceeded the timeout limit.", operationTimeout).insertOne(dataKeyDocument);
         return dataKeyDocument.getBinary("_id");
     }
 
     @Override
     public BsonBinary encrypt(final BsonValue value, final EncryptOptions options) {
-        return crypt.encryptExplicitly(value, options);
+        Timeout operationTimeout = startTimeout();
+        return crypt.encryptExplicitly(value, options, operationTimeout);
     }
 
     @Override
     public BsonDocument encryptExpression(final Bson expression, final EncryptOptions options) {
-        return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options);
+        Timeout operationTimeout = startTimeout();
+        return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options,
+                operationTimeout);
     }
 
     @Override
     public BsonValue decrypt(final BsonBinary value) {
-        return crypt.decryptExplicitly(value);
+        Timeout operationTimeout = startTimeout();
+        return crypt.decryptExplicitly(value, operationTimeout);
     }
 
     @Override
     public DeleteResult deleteKey(final BsonBinary id) {
-        return collection.deleteOne(Filters.eq("_id", id));
+        return collectionWithTimeout(collection, startTimeout()).deleteOne(Filters.eq("_id", id));
     }
 
     @Override
     public BsonDocument getKey(final BsonBinary id) {
-        return collection.find(Filters.eq("_id", id)).first();
+        return collectionWithTimeout(collection, startTimeout()).find(Filters.eq("_id", id)).first();
     }
 
     @Override
     public FindIterable<BsonDocument> getKeys() {
-        return collection.find();
+        return collectionWithTimeout(collection, startTimeout()).find();
     }
 
     @Override
@@ -170,7 +198,9 @@ public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter) {
     @Override
     public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) {
         validateRewrapManyDataKeyOptions(options);
-        BsonDocument results = crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options);
+        Timeout operationTimeout = startTimeout();
+        BsonDocument results = crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()),
+                options, operationTimeout);
         if (results.isEmpty()) {
             return new RewrapManyDataKeyResult();
         }
@@ -183,7 +213,8 @@ public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final Rewrap
                                     Updates.currentDate("updateDate"))
                     );
         }).collect(Collectors.toList());
-        return new RewrapManyDataKeyResult(collection.bulkWrite(updateModels));
+        BulkWriteResult bulkWriteResult = collectionWithTimeout(collection, operationTimeout).bulkWrite(updateModels);
+        return new RewrapManyDataKeyResult(bulkWriteResult);
     }
 
     @Override
@@ -192,6 +223,7 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina
         notNull("collectionName", collectionName);
         notNull("createCollectionOptions", createCollectionOptions);
         notNull("createEncryptedCollectionParams", createEncryptedCollectionParams);
+        Timeout operationTimeout = startTimeout();
         MongoNamespace namespace = new MongoNamespace(database.getName(), collectionName);
         Bson rawEncryptedFields = createCollectionOptions.getEncryptedFields();
         if (rawEncryptedFields == null) {
@@ -222,10 +254,10 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina
                             // It is crucial to set the `dataKeyMightBeCreated` flag either immediately before calling `createDataKey`,
                             // or after that in a `finally` block.
                             dataKeyMightBeCreated.set(true);
-                            BsonBinary dataKeyId = createDataKey(kmsProvider, dataKeyOptions);
+                            BsonBinary dataKeyId = createDataKey(kmsProvider, dataKeyOptions, operationTimeout);
                             field.put(keyIdBsonKey, dataKeyId);
                         });
-                database.createCollection(collectionName,
+                databaseWithTimeout(database, operationTimeout).createCollection(collectionName,
                         new CreateCollectionOptions(createCollectionOptions).encryptedFields(maybeUpdatedEncryptedFields));
                 return maybeUpdatedEncryptedFields;
             } catch (Exception e) {
@@ -236,7 +268,7 @@ public BsonDocument createEncryptedCollection(final MongoDatabase database, fina
                 }
             }
         } else {
-            database.createCollection(collectionName, createCollectionOptions);
+            databaseWithTimeout(database, operationTimeout).createCollection(collectionName, createCollectionOptions);
             return encryptedFields;
         }
     }
@@ -246,4 +278,9 @@ public void close() {
         crypt.close();
         keyVaultClient.close();
     }
+
+    @Nullable
+    private Timeout startTimeout() {
+        return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS));
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java
index a265ca01a7d..2d8a4dbfb30 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java
@@ -18,11 +18,8 @@
 
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
 import com.mongodb.client.ClientSession;
 import com.mongodb.connection.ClusterType;
-import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.binding.AbstractReferenceCounted;
 import com.mongodb.internal.binding.ClusterAwareReadWriteBinding;
@@ -30,9 +27,8 @@
 import com.mongodb.internal.binding.ReadWriteBinding;
 import com.mongodb.internal.binding.TransactionContext;
 import com.mongodb.internal.connection.Connection;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.session.ClientSessionContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
 
 import java.util.function.Supplier;
 
@@ -48,14 +44,14 @@ public class ClientSessionBinding extends AbstractReferenceCounted implements Re
     private final ClusterAwareReadWriteBinding wrapped;
     private final ClientSession session;
     private final boolean ownsSession;
-    private final ClientSessionContext sessionContext;
+    private final OperationContext operationContext;
 
     public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final ClusterAwareReadWriteBinding wrapped) {
         this.wrapped = wrapped;
         wrapped.retain();
         this.session = notNull("session", session);
         this.ownsSession = ownsSession;
-        this.sessionContext = new SyncClientSessionContext(session);
+        this.operationContext = wrapped.getOperationContext().withSessionContext(new SyncClientSessionContext(session));
     }
 
     @Override
@@ -102,25 +98,9 @@ public ConnectionSource getWriteConnectionSource() {
         return new SessionBindingConnectionSource(getConnectionSource(wrapped::getWriteConnectionSource));
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return sessionContext;
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
-        return wrapped.getOperationContext();
+        return operationContext;
     }
 
     private ConnectionSource getConnectionSource(final Supplier<ConnectionSource> wrappedConnectionSourceSupplier) {
@@ -155,24 +135,9 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return sessionContext;
-        }
-
         @Override
         public OperationContext getOperationContext() {
-            return wrapped.getOperationContext();
-        }
-
-        @Override
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
+            return operationContext;
         }
 
         @Override
@@ -250,7 +215,7 @@ public ReadConcern getReadConcern() {
             } else if (isSnapshot()) {
                 return ReadConcern.SNAPSHOT;
             } else {
-               return wrapped.getSessionContext().getReadConcern();
+               return wrapped.getOperationContext().getSessionContext().getReadConcern();
             }
         }
     }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java
index 4a6afe4101b..d3bbd850ae0 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java
@@ -21,17 +21,21 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoExecutionTimeoutException;
 import com.mongodb.MongoInternalException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.ReadConcern;
 import com.mongodb.TransactionOptions;
 import com.mongodb.WriteConcern;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.TransactionBody;
+import com.mongodb.internal.TimeoutContext;
 import com.mongodb.internal.operation.AbortTransactionOperation;
 import com.mongodb.internal.operation.CommitTransactionOperation;
 import com.mongodb.internal.operation.ReadOperation;
+import com.mongodb.internal.operation.WriteConcernHelper;
 import com.mongodb.internal.operation.WriteOperation;
 import com.mongodb.internal.session.BaseClientSessionImpl;
 import com.mongodb.internal.session.ServerSessionPool;
+import com.mongodb.lang.Nullable;
 
 import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL;
 import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL;
@@ -39,26 +43,21 @@
 import static com.mongodb.assertions.Assertions.assertTrue;
 import static com.mongodb.assertions.Assertions.isTrue;
 import static com.mongodb.assertions.Assertions.notNull;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
 
 final class ClientSessionImpl extends BaseClientSessionImpl implements ClientSession {
 
-    private enum TransactionState {
-        NONE, IN, COMMITTED, ABORTED
-    }
-
     private static final int MAX_RETRY_TIME_LIMIT_MS = 120000;
 
-    private final MongoClientDelegate delegate;
+    private final OperationExecutor operationExecutor;
     private TransactionState transactionState = TransactionState.NONE;
     private boolean messageSentInCurrentTransaction;
     private boolean commitInProgress;
     private TransactionOptions transactionOptions;
 
     ClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options,
-                      final MongoClientDelegate delegate) {
+                      final OperationExecutor operationExecutor) {
         super(serverSessionPool, originator, options);
-        this.delegate = delegate;
+        this.operationExecutor = operationExecutor;
     }
 
     @Override
@@ -104,6 +103,47 @@ public void startTransaction() {
 
     @Override
     public void startTransaction(final TransactionOptions transactionOptions) {
+        startTransaction(transactionOptions, createTimeoutContext(transactionOptions));
+    }
+
+    @Override
+    public void commitTransaction() {
+        commitTransaction(true);
+    }
+
+    @Override
+    public void abortTransaction() {
+        if (transactionState == TransactionState.ABORTED) {
+            throw new IllegalStateException("Cannot call abortTransaction twice");
+        }
+        if (transactionState == TransactionState.COMMITTED) {
+            throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction");
+        }
+        if (transactionState == TransactionState.NONE) {
+            throw new IllegalStateException("There is no transaction started");
+        }
+        try {
+            if (messageSentInCurrentTransaction) {
+                ReadConcern readConcern = transactionOptions.getReadConcern();
+                if (readConcern == null) {
+                    throw new MongoInternalException("Invariant violated.  Transaction options read concern can not be null");
+                }
+                resetTimeout();
+                TimeoutContext timeoutContext = getTimeoutContext();
+                WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext));
+                operationExecutor
+                        .execute(new AbortTransactionOperation(writeConcern)
+                                .recoveryToken(getRecoveryToken()), readConcern, this);
+            }
+        } catch (RuntimeException e) {
+            // ignore exceptions from abort
+        } finally {
+            clearTransactionContext();
+            cleanupTransaction(TransactionState.ABORTED);
+        }
+    }
+
+    private void startTransaction(final TransactionOptions transactionOptions, final TimeoutContext timeoutContext) {
         Boolean snapshot = getOptions().isSnapshot();
         if (snapshot != null && snapshot) {
             throw new IllegalArgumentException("Transactions are not supported in snapshot sessions");
@@ -119,7 +159,7 @@ public void startTransaction(final TransactionOptions transactionOptions) {
         }
         getServerSession().advanceTransactionNumber();
         this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions());
-        WriteConcern writeConcern = this.transactionOptions.getWriteConcern();
+        WriteConcern writeConcern = getWriteConcern(timeoutContext);
         if (writeConcern == null) {
             throw new MongoInternalException("Invariant violated.  Transaction options write concern can not be null");
         }
@@ -127,10 +167,19 @@ public void startTransaction(final TransactionOptions transactionOptions) {
             throw new MongoClientException("Transactions do not support unacknowledged write concern");
         }
         clearTransactionContext();
+        setTimeoutContext(timeoutContext);
     }
 
-    @Override
-    public void commitTransaction() {
+    @Nullable
+    private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) {
+        WriteConcern writeConcern = transactionOptions.getWriteConcern();
+        if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) {
+            return WriteConcernHelper.cloneWithoutTimeout(writeConcern);
+        }
+        return writeConcern;
+    }
+
+    private void commitTransaction(final boolean resetTimeout) {
         if (transactionState == TransactionState.ABORTED) {
             throw new IllegalStateException("Cannot call commitTransaction after calling abortTransaction");
         }
@@ -145,11 +194,15 @@ public void commitTransaction() {
                     throw new MongoInternalException("Invariant violated.  Transaction options read concern can not be null");
                 }
                 commitInProgress = true;
-                delegate.getOperationExecutor().execute(new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()),
-                        transactionState == TransactionState.COMMITTED)
-                                .recoveryToken(getRecoveryToken())
-                                .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS),
-                        readConcern, this);
+                if (resetTimeout) {
+                    resetTimeout();
+                }
+                TimeoutContext timeoutContext = getTimeoutContext();
+                WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext));
+                operationExecutor
+                        .execute(new CommitTransactionOperation(writeConcern,
+                                transactionState == TransactionState.COMMITTED)
+                                .recoveryToken(getRecoveryToken()), readConcern, this);
             }
         } catch (MongoException e) {
             clearTransactionContextOnError(e);
@@ -160,35 +213,6 @@ public void commitTransaction() {
         }
     }
 
-    @Override
-    public void abortTransaction() {
-        if (transactionState == TransactionState.ABORTED) {
-            throw new IllegalStateException("Cannot call abortTransaction twice");
-        }
-        if (transactionState == TransactionState.COMMITTED) {
-            throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction");
-        }
-        if (transactionState == TransactionState.NONE) {
-            throw new IllegalStateException("There is no transaction started");
-        }
-        try {
-            if (messageSentInCurrentTransaction) {
-                ReadConcern readConcern = transactionOptions.getReadConcern();
-                if (readConcern == null) {
-                    throw new MongoInternalException("Invariant violated.  Transaction options read concern can not be null");
-                }
-                delegate.getOperationExecutor().execute(new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()))
-                                .recoveryToken(getRecoveryToken()),
-                        readConcern, this);
-            }
-        } catch (RuntimeException e) {
-            // ignore exceptions from abort
-        } finally {
-            clearTransactionContext();
-            cleanupTransaction(TransactionState.ABORTED);
-        }
-    }
-
     private void clearTransactionContextOnError(final MongoException e) {
         if (e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) || e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) {
             clearTransactionContext();
@@ -204,17 +228,19 @@ public <T> T withTransaction(final TransactionBody<T> transactionBody) {
     public <T> T withTransaction(final TransactionBody<T> transactionBody, final TransactionOptions options) {
         notNull("transactionBody", transactionBody);
         long startTime = ClientSessionClock.INSTANCE.now();
+        TimeoutContext withTransactionTimeoutContext = createTimeoutContext(options);
+
         outer:
         while (true) {
             T retVal;
             try {
-                startTransaction(options);
+                startTransaction(options, withTransactionTimeoutContext.copyTimeoutContext());
                 retVal = transactionBody.execute();
             } catch (Throwable e) {
                 if (transactionState == TransactionState.IN) {
                     abortTransaction();
                 }
-                if (e instanceof MongoException) {
+                if (e instanceof MongoException && !(e instanceof MongoOperationTimeoutException)) {
                     if (((MongoException) e).hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)
                             && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) {
                         continue;
@@ -225,11 +251,12 @@ public <T> T withTransaction(final TransactionBody<T> transactionBody, final Tra
             if (transactionState == TransactionState.IN) {
                 while (true) {
                     try {
-                        commitTransaction();
+                        commitTransaction(false);
                         break;
                     } catch (MongoException e) {
                         clearTransactionContextOnError(e);
-                        if (ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) {
+                        if (!(e instanceof MongoOperationTimeoutException)
+                                && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) {
                             applyMajorityWriteConcernToTransactionOptions();
 
                             if (!(e instanceof MongoExecutionTimeoutException)
@@ -247,10 +274,23 @@ public <T> T withTransaction(final TransactionBody<T> transactionBody, final Tra
         }
     }
 
+    @Override
+    public void close() {
+        try {
+            if (transactionState == TransactionState.IN) {
+                abortTransaction();
+            }
+        } finally {
+            clearTransactionContext();
+            super.close();
+        }
+    }
+
     // Apply majority write concern if the commit is to be retried.
     private void applyMajorityWriteConcernToTransactionOptions() {
         if (transactionOptions != null) {
-            WriteConcern writeConcern = transactionOptions.getWriteConcern();
+            TimeoutContext timeoutContext = getTimeoutContext();
+            WriteConcern writeConcern = getWriteConcern(timeoutContext);
             if (writeConcern != null) {
                 transactionOptions = TransactionOptions.merge(TransactionOptions.builder()
                         .writeConcern(writeConcern.withW("majority")).build(), transactionOptions);
@@ -263,21 +303,16 @@ private void applyMajorityWriteConcernToTransactionOptions() {
         }
     }
 
-    @Override
-    public void close() {
-        try {
-            if (transactionState == TransactionState.IN) {
-                abortTransaction();
-            }
-        } finally {
-            clearTransactionContext();
-            super.close();
-        }
-    }
-
     private void cleanupTransaction(final TransactionState nextState) {
         messageSentInCurrentTransaction = false;
         transactionOptions = null;
         transactionState = nextState;
+        setTimeoutContext(null);
+    }
+
+    private TimeoutContext createTimeoutContext(final TransactionOptions transactionOptions) {
+        return new TimeoutContext(getTimeoutSettings(
+                TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()),
+                operationExecutor.getTimeoutSettings()));
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java
index 6098aef53b8..934a3dce486 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java
@@ -17,13 +17,16 @@
 package com.mongodb.client.internal;
 
 import com.mongodb.client.MongoClient;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout;
 
 class CollectionInfoRetriever {
 
+    private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit.";
     private final MongoClient client;
 
     CollectionInfoRetriever(final MongoClient client) {
@@ -31,7 +34,8 @@ class CollectionInfoRetriever {
     }
 
     @Nullable
-    public BsonDocument filter(final String databaseName, final BsonDocument filter) {
-        return client.getDatabase(databaseName).listCollections(BsonDocument.class).filter(filter).first();
+    public BsonDocument filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) {
+        return databaseWithTimeout(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE,
+                operationTimeout).listCollections(BsonDocument.class).filter(filter).first();
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java
index 05cfc9462d6..9e2d7b3889b 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java
@@ -19,12 +19,15 @@
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoTimeoutException;
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.client.MongoClient;
 import com.mongodb.client.MongoClients;
+import com.mongodb.client.MongoDatabase;
 import com.mongodb.crypt.capi.MongoCrypt;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.RawBsonDocument;
 
@@ -32,6 +35,7 @@
 import java.util.Map;
 
 import static com.mongodb.assertions.Assertions.assertNotNull;
+import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout;
 import static com.mongodb.internal.capi.MongoCryptHelper.createMongocryptdClientSettings;
 import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder;
 import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled;
@@ -39,6 +43,7 @@
 
 @SuppressWarnings("UseOfProcessBuilder")
 class CommandMarker implements Closeable {
+    private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit.";
     @Nullable
     private final MongoClient client;
     @Nullable
@@ -58,7 +63,6 @@ class CommandMarker implements Closeable {
      *  <li>The extraOptions.cryptSharedLibRequired option is false.</li>
      * </ul>
      *  Then mongocryptd MUST be spawned by the driver.
-     * </p>
      */
     CommandMarker(
             final MongoCrypt mongoCrypt,
@@ -80,17 +84,19 @@ class CommandMarker implements Closeable {
         }
     }
 
-    RawBsonDocument mark(final String databaseName, final RawBsonDocument command) {
+    RawBsonDocument mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) {
         if (client != null) {
             try {
                 try {
-                    return executeCommand(databaseName, command);
+                    return executeCommand(databaseName, command, operationTimeout);
+                } catch (MongoOperationTimeoutException e){
+                    throw e;
                 } catch (MongoTimeoutException e) {
                     if (processBuilder == null) {  // mongocryptdBypassSpawn=true
                         throw e;
                     }
                     startProcess(processBuilder);
-                    return executeCommand(databaseName, command);
+                    return executeCommand(databaseName, command, operationTimeout);
                 }
             } catch (MongoException e) {
                 throw wrapInClientException(e);
@@ -107,11 +113,14 @@ public void close() {
         }
     }
 
-    private RawBsonDocument executeCommand(final String databaseName, final RawBsonDocument markableCommand) {
+    private RawBsonDocument executeCommand(final String databaseName, final RawBsonDocument markableCommand, @Nullable final Timeout operationTimeout) {
         assertNotNull(client);
-        return client.getDatabase(databaseName)
+
+        MongoDatabase mongoDatabase = client.getDatabase(databaseName)
                 .withReadConcern(ReadConcern.DEFAULT)
-                .withReadPreference(ReadPreference.primary())
+                .withReadPreference(ReadPreference.primary());
+
+        return databaseWithTimeout(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout)
                 .runCommand(markableCommand, RawBsonDocument.class);
     }
 
diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java
index 792061d7748..53a65ceaa02 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java
@@ -20,6 +20,7 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoInternalException;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.MongoClient;
 import com.mongodb.client.model.vault.DataKeyOptions;
 import com.mongodb.client.model.vault.EncryptOptions;
@@ -31,6 +32,7 @@
 import com.mongodb.crypt.capi.MongoKeyDecryptor;
 import com.mongodb.crypt.capi.MongoRewrapManyDataKeyOptions;
 import com.mongodb.internal.capi.MongoCryptHelper;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinary;
 import org.bson.BsonDocument;
@@ -131,7 +133,7 @@ public class Crypt implements Closeable {
      * @param command   the unencrypted command
      * @return the encrypted command
      */
-    RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command) {
+    RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout timeoutOperation) {
         notNull("databaseName", databaseName);
         notNull("command", command);
 
@@ -140,7 +142,7 @@ RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command
         }
 
        try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptionContext(databaseName, command)) {
-           return executeStateMachine(encryptionContext, databaseName);
+           return executeStateMachine(encryptionContext, databaseName, timeoutOperation);
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -152,10 +154,10 @@ RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command
      * @param commandResponse the encrypted command response
      * @return the decrypted command response
      */
-    RawBsonDocument decrypt(final RawBsonDocument commandResponse) {
+    RawBsonDocument decrypt(final RawBsonDocument commandResponse,  @Nullable final Timeout timeoutOperation) {
         notNull("commandResponse", commandResponse);
         try (MongoCryptContext decryptionContext = mongoCrypt.createDecryptionContext(commandResponse)) {
-            return executeStateMachine(decryptionContext, null);
+            return executeStateMachine(decryptionContext, null, timeoutOperation);
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -168,7 +170,7 @@ RawBsonDocument decrypt(final RawBsonDocument commandResponse) {
      * @param options     the data key options
      * @return the document representing the data key to be added to the key vault
      */
-    BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions options) {
+    BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) {
         notNull("kmsProvider", kmsProvider);
         notNull("options", options);
 
@@ -178,7 +180,7 @@ BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions option
                         .masterKey(options.getMasterKey())
                         .keyMaterial(options.getKeyMaterial())
                         .build())) {
-            return executeStateMachine(dataKeyCreationContext, null);
+            return executeStateMachine(dataKeyCreationContext, null, operationTimeout);
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -191,13 +193,13 @@ BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions option
      * @param options the options
      * @return the encrypted value
      */
-    BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options) {
+    BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout timeoutOperation) {
         notNull("value", value);
         notNull("options", options);
 
         try (MongoCryptContext encryptionContext = mongoCrypt.createExplicitEncryptionContext(
                 new BsonDocument("v", value), asMongoExplicitEncryptOptions(options))) {
-            return executeStateMachine(encryptionContext, null).getBinary("v");
+            return executeStateMachine(encryptionContext, null, timeoutOperation).getBinary("v");
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -210,14 +212,14 @@ BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options
      * @param options    the options
      * @return the encrypted expression
      */
-    @Beta(Beta.Reason.SERVER)
-    BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options) {
+    @Beta(Reason.SERVER)
+    BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout timeoutOperation) {
         notNull("expression", expression);
         notNull("options", options);
 
         try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptExpressionContext(
                 new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options))) {
-            return executeStateMachine(encryptionContext, null).getDocument("v");
+            return executeStateMachine(encryptionContext, null, timeoutOperation).getDocument("v");
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -229,10 +231,10 @@ BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptio
      * @param value the encrypted value
      * @return the decrypted value
      */
-    BsonValue decryptExplicitly(final BsonBinary value) {
+    BsonValue decryptExplicitly(final BsonBinary value, @Nullable final Timeout timeoutOperation) {
         notNull("value", value);
         try (MongoCryptContext decryptionContext = mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value))) {
-            return assertNotNull(executeStateMachine(decryptionContext, null).get("v"));
+            return assertNotNull(executeStateMachine(decryptionContext, null, timeoutOperation).get("v"));
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
         }
@@ -245,7 +247,7 @@ BsonValue decryptExplicitly(final BsonBinary value) {
      * @return the decrypted value
      * @since 4.7
      */
-    BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options) {
+    BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) {
         notNull("filter", filter);
         try {
             try (MongoCryptContext rewrapManyDatakeyContext = mongoCrypt.createRewrapManyDatakeyContext(filter,
@@ -254,7 +256,7 @@ BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKe
                             .provider(options.getProvider())
                             .masterKey(options.getMasterKey())
                             .build())) {
-                return executeStateMachine(rewrapManyDatakeyContext, null);
+                return executeStateMachine(rewrapManyDatakeyContext, null, operationTimeout);
             }
         } catch (MongoCryptException e) {
             throw wrapInMongoException(e);
@@ -274,24 +276,24 @@ public void close() {
         }
     }
 
-    private RawBsonDocument executeStateMachine(final MongoCryptContext cryptContext, @Nullable final String databaseName) {
+    private RawBsonDocument executeStateMachine(final MongoCryptContext cryptContext, @Nullable final String databaseName, @Nullable final Timeout operationTimeout) {
         while (true) {
             State state = cryptContext.getState();
             switch (state) {
                 case NEED_MONGO_COLLINFO:
-                    collInfo(cryptContext, notNull("databaseName", databaseName));
+                    collInfo(cryptContext, notNull("databaseName", databaseName), operationTimeout);
                     break;
                 case NEED_MONGO_MARKINGS:
-                    mark(cryptContext, notNull("databaseName", databaseName));
+                    mark(cryptContext, notNull("databaseName", databaseName), operationTimeout);
                     break;
                 case NEED_KMS_CREDENTIALS:
                     fetchCredentials(cryptContext);
                     break;
                 case NEED_MONGO_KEYS:
-                    fetchKeys(cryptContext);
+                    fetchKeys(cryptContext, operationTimeout);
                     break;
                 case NEED_KMS:
-                    decryptKeys(cryptContext);
+                    decryptKeys(cryptContext, operationTimeout);
                     break;
                 case READY:
                     return cryptContext.finish();
@@ -307,9 +309,9 @@ private void fetchCredentials(final MongoCryptContext cryptContext) {
         cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers));
     }
 
-    private void collInfo(final MongoCryptContext cryptContext, final String databaseName) {
+    private void collInfo(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) {
         try {
-            BsonDocument collectionInfo = assertNotNull(collectionInfoRetriever).filter(databaseName, cryptContext.getMongoOperation());
+            BsonDocument collectionInfo = assertNotNull(collectionInfoRetriever).filter(databaseName, cryptContext.getMongoOperation(), operationTimeout);
             if (collectionInfo != null) {
                 cryptContext.addMongoOperationResult(collectionInfo);
             }
@@ -319,9 +321,9 @@ private void collInfo(final MongoCryptContext cryptContext, final String databas
         }
     }
 
-    private void mark(final MongoCryptContext cryptContext, final String databaseName) {
+    private void mark(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) {
         try {
-            RawBsonDocument markedCommand = assertNotNull(commandMarker).mark(databaseName, cryptContext.getMongoOperation());
+            RawBsonDocument markedCommand = assertNotNull(commandMarker).mark(databaseName, cryptContext.getMongoOperation(), operationTimeout);
             cryptContext.addMongoOperationResult(markedCommand);
             cryptContext.completeMongoOperation();
         } catch (Throwable t) {
@@ -329,9 +331,9 @@ private void mark(final MongoCryptContext cryptContext, final String databaseNam
         }
     }
 
-    private void fetchKeys(final MongoCryptContext keyBroker) {
+    private void fetchKeys(final MongoCryptContext keyBroker, @Nullable final Timeout operationTimeout) {
         try {
-            for (BsonDocument bsonDocument : keyRetriever.find(keyBroker.getMongoOperation())) {
+            for (BsonDocument bsonDocument : keyRetriever.find(keyBroker.getMongoOperation(), operationTimeout)) {
                 keyBroker.addMongoOperationResult(bsonDocument);
             }
             keyBroker.completeMongoOperation();
@@ -340,11 +342,11 @@ private void fetchKeys(final MongoCryptContext keyBroker) {
         }
     }
 
-    private void decryptKeys(final MongoCryptContext cryptContext) {
+    private void decryptKeys(final MongoCryptContext cryptContext, @Nullable final Timeout operationTimeout) {
         try {
             MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor();
             while (keyDecryptor != null) {
-                decryptKey(keyDecryptor);
+                decryptKey(keyDecryptor, operationTimeout);
                 keyDecryptor = cryptContext.nextKeyDecryptor();
             }
             cryptContext.completeKeyDecryptors();
@@ -354,9 +356,9 @@ private void decryptKeys(final MongoCryptContext cryptContext) {
         }
     }
 
-    private void decryptKey(final MongoKeyDecryptor keyDecryptor) throws IOException {
+    private void decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) throws IOException {
         try (InputStream inputStream = keyManagementService.stream(keyDecryptor.getKmsProvider(), keyDecryptor.getHostName(),
-                keyDecryptor.getMessage())) {
+                keyDecryptor.getMessage(), operationTimeout)) {
             int bytesNeeded = keyDecryptor.bytesNeeded();
 
             while (bytesNeeded > 0) {
diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java
index ab195a46dd5..036466077ec 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java
@@ -17,17 +17,14 @@
 package com.mongodb.client.internal;
 
 import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
 import com.mongodb.ServerAddress;
-import com.mongodb.ServerApi;
 import com.mongodb.connection.ServerDescription;
 import com.mongodb.internal.binding.ClusterAwareReadWriteBinding;
 import com.mongodb.internal.binding.ConnectionSource;
 import com.mongodb.internal.binding.ReadWriteBinding;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.OperationContext;
-import com.mongodb.internal.session.SessionContext;
-import com.mongodb.lang.Nullable;
+
 
 class CryptBinding implements ClusterAwareReadWriteBinding {
     private final ClusterAwareReadWriteBinding wrapped;
@@ -63,22 +60,6 @@ public ConnectionSource getConnectionSource(final ServerAddress serverAddress) {
         return new CryptConnectionSource(wrapped.getConnectionSource(serverAddress));
     }
 
-    @Override
-    public SessionContext getSessionContext() {
-        return wrapped.getSessionContext();
-    }
-
-    @Override
-    @Nullable
-    public ServerApi getServerApi() {
-        return wrapped.getServerApi();
-    }
-
-    @Override
-    public RequestContext getRequestContext() {
-        return wrapped.getRequestContext();
-    }
-
     @Override
     public OperationContext getOperationContext() {
         return wrapped.getOperationContext();
@@ -112,26 +93,11 @@ public ServerDescription getServerDescription() {
             return wrapped.getServerDescription();
         }
 
-        @Override
-        public SessionContext getSessionContext() {
-            return wrapped.getSessionContext();
-        }
-
         @Override
         public OperationContext getOperationContext() {
             return wrapped.getOperationContext();
         }
 
-        @Override
-        public ServerApi getServerApi() {
-            return wrapped.getServerApi();
-        }
-
-        @Override
-        public RequestContext getRequestContext() {
-            return wrapped.getRequestContext();
-        }
-
         @Override
         public ReadPreference getReadPreference() {
             return wrapped.getReadPreference();
diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java
index 18742d487f9..f47f6a810a6 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java
@@ -19,11 +19,12 @@
 import com.mongodb.MongoClientException;
 import com.mongodb.ReadPreference;
 import com.mongodb.connection.ConnectionDescription;
-import com.mongodb.internal.binding.BindingContext;
 import com.mongodb.internal.connection.Connection;
 import com.mongodb.internal.connection.MessageSettings;
+import com.mongodb.internal.connection.OperationContext;
 import com.mongodb.internal.connection.SplittablePayload;
 import com.mongodb.internal.connection.SplittablePayloadBsonWriter;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.internal.validator.MappedFieldNameValidator;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonBinaryReader;
@@ -86,7 +87,7 @@ public ConnectionDescription getDescription() {
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator,
             @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder,
-            final BindingContext context, final boolean responseExpected,
+            final OperationContext operationContext, final boolean responseExpected,
             @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) {
 
         if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) {
@@ -104,17 +105,18 @@ public <T> T command(final String database, final BsonDocument command, final Fi
 
         getEncoder(command).encode(writer, command, EncoderContext.builder().build());
 
+        Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout();
         RawBsonDocument encryptedCommand = crypt.encrypt(database,
-                new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()));
+                new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout);
 
         RawBsonDocument encryptedResponse = wrapped.command(database, encryptedCommand, commandFieldNameValidator, readPreference,
-                new RawBsonDocumentCodec(), context, responseExpected, null, null);
+                new RawBsonDocumentCodec(), operationContext, responseExpected, null, null);
 
         if (encryptedResponse == null) {
             return null;
         }
 
-        RawBsonDocument decryptedResponse = crypt.decrypt(encryptedResponse);
+        RawBsonDocument decryptedResponse = crypt.decrypt(encryptedResponse, operationTimeout);
 
         BsonBinaryReader reader = new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO());
 
@@ -124,8 +126,8 @@ public <T> T command(final String database, final BsonDocument command, final Fi
     @Nullable
     @Override
     public <T> T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator,
-            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final BindingContext context) {
-        return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, context, true, null, null);
+            @Nullable final ReadPreference readPreference, final Decoder<T> commandResultDecoder, final OperationContext operationContext) {
+        return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null);
     }
 
     @SuppressWarnings("unchecked")
diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java
index 73e4d42e8ef..55274fcc786 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java
@@ -35,11 +35,11 @@
  */
 public final class Crypts {
 
-    public static Crypt createCrypt(final MongoClientImpl client, final AutoEncryptionSettings settings) {
+    public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings settings) {
         MongoClient sharedInternalClient = null;
         MongoClientSettings keyVaultMongoClientSettings = settings.getKeyVaultMongoClientSettings();
         if (keyVaultMongoClientSettings == null || !settings.isBypassAutoEncryption()) {
-            MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(client.getSettings())
+            MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings)
                     .applyToConnectionPoolSettings(builder -> builder.minSize(0))
                     .autoEncryptionSettings(null)
                     .build();
diff --git a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java
index 3c4e1d18ea3..b37931c52cb 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java
@@ -21,7 +21,9 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.DistinctIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.SyncOperations;
@@ -46,19 +48,12 @@ class DistinctIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResult
     private Collation collation;
     private BsonValue comment;
 
-    DistinctIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
-                         final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                         final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter) {
-        this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, fieldName,
-                filter, true);
-    }
-
     DistinctIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
                          final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
                          final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter,
-                         final boolean retryReads) {
-        super(clientSession, executor, readConcern, readPreference, retryReads);
-        this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads);
+                         final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings);
+        this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.resultClass = notNull("resultClass", resultClass);
         this.fieldName = notNull("mapFunction", fieldName);
         this.filter = filter;
@@ -83,6 +78,12 @@ public DistinctIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public DistinctIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public DistinctIterable<TResult> collation(@Nullable final Collation collation) {
         this.collation = collation;
@@ -103,6 +104,11 @@ public DistinctIterable<TResult> comment(@Nullable final BsonValue comment) {
 
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
-        return operations.distinct(fieldName, filter, resultClass, maxTimeMS, collation, comment);
+        return operations.distinct(fieldName, filter, resultClass, collation, comment);
+    }
+
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java
index de0fdc94f3e..fbead0d7911 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java
@@ -23,7 +23,9 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.FindIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ExplainableReadOperation;
@@ -49,16 +51,11 @@ class FindIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResult> im
     private Bson filter;
 
     FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
-                     final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                     final ReadConcern readConcern, final OperationExecutor executor, final Bson filter) {
-        this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, filter, true);
-    }
-
-    FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
-                     final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                     final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads) {
-        super(clientSession, executor, readConcern, readPreference, retryReads);
-        this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads);
+            final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
+            final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads,
+            final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings);
+        this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.resultClass = notNull("resultClass", resultClass);
         this.filter = notNull("filter", filter);
         this.findOptions = new FindOptions();
@@ -92,7 +89,7 @@ public FindIterable<TResult> maxTime(final long maxTime, final TimeUnit timeUnit
 
     @Override
     public FindIterable<TResult> maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
-        notNull("timeUnit", timeUnit);
+        validateMaxAwaitTime(maxAwaitTime, timeUnit);
         findOptions.maxAwaitTime(maxAwaitTime, timeUnit);
         return this;
     }
@@ -104,6 +101,13 @@ public FindIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public FindIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        findOptions.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public FindIterable<TResult> collation(@Nullable final Collation collation) {
         findOptions.collation(collation);
@@ -203,8 +207,8 @@ public FindIterable<TResult> allowDiskUse(@Nullable final Boolean allowDiskUse)
     @Nullable
     @Override
     public TResult first() {
-        try (BatchCursor<TResult> batchCursor = getExecutor().execute(operations.findFirst(filter, resultClass, findOptions),
-                getReadPreference(), getReadConcern(), getClientSession())) {
+        try (BatchCursor<TResult> batchCursor = getExecutor().execute(
+                operations.findFirst(filter, resultClass, findOptions), getReadPreference(), getReadConcern(), getClientSession())) {
             return batchCursor.hasNext() ? batchCursor.next().iterator().next() : null;
         }
     }
@@ -229,10 +233,15 @@ public <E> E explain(final Class<E> explainResultClass, final ExplainVerbosity v
         return executeExplain(explainResultClass, notNull("verbosity", verbosity));
     }
 
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(findOptions));
+    }
+
     private <E> E executeExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
         notNull("explainDocumentClass", explainResultClass);
-        return getExecutor().execute(asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)),
-                getReadPreference(), getReadConcern(), getClientSession());
+        return getExecutor().execute(
+                asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession());
     }
 
     public ExplainableReadOperation<BatchCursor<TResult>> asReadOperation() {
diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java
index 7ae6f106ed5..fee5ddac729 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java
@@ -17,9 +17,13 @@
 package com.mongodb.client.internal;
 
 import com.mongodb.ServerAddress;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.connection.SslHelper;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
-import com.mongodb.internal.connection.SslHelper;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
+import org.jetbrains.annotations.NotNull;
 
 import javax.net.SocketFactory;
 import javax.net.ssl.SSLContext;
@@ -32,10 +36,14 @@
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.net.SocketException;
 import java.nio.ByteBuffer;
 import java.util.Map;
 
+import static com.mongodb.assertions.Assertions.assertNotNull;
 import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 class KeyManagementService {
     private static final Logger LOGGER = Loggers.getLogger("client");
@@ -47,7 +55,7 @@ class KeyManagementService {
         this.timeoutMillis = timeoutMillis;
     }
 
-    public InputStream stream(final String kmsProvider, final String host, final ByteBuffer message) throws IOException {
+    public InputStream stream(final String kmsProvider, final String host, final ByteBuffer message, @Nullable final Timeout operationTimeout) throws IOException {
         ServerAddress serverAddress = new ServerAddress(host);
 
         LOGGER.info("Connecting to KMS server at " + serverAddress);
@@ -79,7 +87,7 @@ public InputStream stream(final String kmsProvider, final String host, final Byt
         }
 
         try {
-            return socket.getInputStream();
+            return OperationTimeoutAwareInputStream.wrapIfNeeded(operationTimeout, socket);
         } catch (IOException e) {
             closeSocket(socket);
             throw e;
@@ -102,4 +110,85 @@ private void closeSocket(final Socket socket) {
             // ignore
         }
     }
+
+    private static final class OperationTimeoutAwareInputStream extends InputStream {
+        private final Socket socket;
+        private final Timeout operationTimeout;
+        private final InputStream wrapped;
+
+        /**
+         * @param socket - socket to set timeout on.
+         * @param operationTimeout - non-infinite timeout.
+         */
+        private OperationTimeoutAwareInputStream(final Socket socket, final Timeout operationTimeout) throws IOException {
+            this.socket = socket;
+            this.operationTimeout = operationTimeout;
+            this.wrapped = socket.getInputStream();
+        }
+
+        public static InputStream wrapIfNeeded(@Nullable final Timeout operationTimeout, final SSLSocket socket) throws IOException {
+            return Timeout.nullAsInfinite(operationTimeout).checkedCall(NANOSECONDS,
+                    () -> socket.getInputStream(),
+                    (ns) -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout)),
+                    () -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout)));
+        }
+
+        private void setSocketSoTimeoutToOperationTimeout() throws SocketException {
+            operationTimeout.checkedRun(MILLISECONDS,
+                    () -> {
+                        throw new AssertionError("operationTimeout cannot be infinite");
+                    },
+                    (ms) -> socket.setSoTimeout(Math.toIntExact(ms)),
+                    () -> TimeoutContext.throwMongoTimeoutException("Reading from KMS server exceeded the timeout limit."));
+        }
+
+        @Override
+        public int read() throws IOException {
+            setSocketSoTimeoutToOperationTimeout();
+            return wrapped.read();
+        }
+
+        @Override
+        public int read(@NotNull final byte[] b) throws IOException {
+            setSocketSoTimeoutToOperationTimeout();
+            return wrapped.read(b);
+        }
+
+        @Override
+        public int read(@NotNull final byte[] b, final int off, final int len) throws IOException {
+            setSocketSoTimeoutToOperationTimeout();
+            return wrapped.read(b, off, len);
+        }
+
+        @Override
+        public void close() throws IOException {
+            wrapped.close();
+        }
+
+        @Override
+        public long skip(final long n) throws IOException {
+            setSocketSoTimeoutToOperationTimeout();
+            return wrapped.skip(n);
+        }
+
+        @Override
+        public int available() throws IOException {
+            return wrapped.available();
+        }
+
+        @Override
+        public synchronized void mark(final int readlimit) {
+            wrapped.mark(readlimit);
+        }
+
+        @Override
+        public synchronized void reset() throws IOException {
+            wrapped.reset();
+        }
+
+        @Override
+        public boolean markSupported() {
+            return wrapped.markSupported();
+        }
+    }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java
index 14906349404..59544eefc45 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java
@@ -19,14 +19,19 @@
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadConcern;
 import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCollection;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
 import org.bson.BsonDocument;
 
 import java.util.ArrayList;
 import java.util.List;
 
 import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout;
 
 class KeyRetriever {
+    private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit.";
     private final MongoClient client;
     private final MongoNamespace namespace;
 
@@ -35,8 +40,11 @@ class KeyRetriever {
         this.namespace = notNull("namespace", namespace);
     }
 
-    public List<BsonDocument> find(final BsonDocument keyFilter) {
-        return client.getDatabase(namespace.getDatabaseName()).getCollection(namespace.getCollectionName(), BsonDocument.class)
+    public List<BsonDocument> find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) {
+        MongoCollection<BsonDocument> collection = client.getDatabase(namespace.getDatabaseName())
+                .getCollection(namespace.getCollectionName(), BsonDocument.class);
+
+        return collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, operationTimeout)
                 .withReadConcern(ReadConcern.MAJORITY)
                 .find(keyFilter).into(new ArrayList<>());
     }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java
index e6da2f332c1..7d617947077 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java
@@ -21,6 +21,8 @@
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.ListCollectionNamesIterable;
 import com.mongodb.client.ListCollectionsIterable;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.SyncOperations;
@@ -40,7 +42,6 @@ class ListCollectionsIterableImpl<TResult> extends MongoIterableImpl<TResult> im
     private final SyncOperations<BsonDocument> operations;
     private final String databaseName;
     private final Class<TResult> resultClass;
-
     private Bson filter;
     private final boolean collectionNamesOnly;
     private boolean authorizedCollections;
@@ -49,10 +50,10 @@ class ListCollectionsIterableImpl<TResult> extends MongoIterableImpl<TResult> im
 
     ListCollectionsIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final boolean collectionNamesOnly,
                                 final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                                final OperationExecutor executor, final boolean retryReads) {
-        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern?
+                                final OperationExecutor executor, final boolean retryReads,  final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern?
         this.collectionNamesOnly = collectionNamesOnly;
-        this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads);
+        this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.databaseName = notNull("databaseName", databaseName);
         this.resultClass = notNull("resultClass", resultClass);
     }
@@ -76,6 +77,12 @@ public ListCollectionsIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public ListCollectionsIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public ListCollectionsIterable<TResult> comment(@Nullable final String comment) {
         this.comment = comment != null ? new BsonString(comment) : null;
@@ -99,6 +106,11 @@ ListCollectionsIterableImpl<TResult> authorizedCollections(final boolean authori
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
         return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections,
-                getBatchSize(), maxTimeMS, comment);
+                getBatchSize(), comment, getTimeoutMode());
+    }
+
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java
index 50c4eb14b4a..83bc08b3dd1 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java
@@ -19,6 +19,8 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.ListDatabasesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.SyncOperations;
@@ -48,17 +50,11 @@ public class ListDatabasesIterableImpl<TResult> extends MongoIterableImpl<TResul
     private Boolean authorizedDatabasesOnly;
     private BsonValue comment;
 
-    ListDatabasesIterableImpl(@Nullable final ClientSession clientSession, final Class<TResult> resultClass,
-                              final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                              final OperationExecutor executor) {
-        this(clientSession, resultClass, codecRegistry, readPreference, executor, true);
-    }
-
     public ListDatabasesIterableImpl(@Nullable final ClientSession clientSession, final Class<TResult> resultClass,
-                                     final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                                     final OperationExecutor executor, final boolean retryReads) {
-        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern?
-        this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads);
+            final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor,
+            final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern?
+        this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.resultClass = notNull("clazz", resultClass);
     }
 
@@ -75,6 +71,12 @@ public ListDatabasesIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public ListDatabasesIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public ListDatabasesIterable<TResult> filter(@Nullable final Bson filter) {
         this.filter = filter;
@@ -107,6 +109,11 @@ public ListDatabasesIterable<TResult> comment(@Nullable final BsonValue comment)
 
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
-        return operations.listDatabases(resultClass, filter, nameOnly, maxTimeMS, authorizedDatabasesOnly, comment);
+        return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabasesOnly, comment);
+    }
+
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java
index c2a9d528007..19be1bdc8ed 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java
@@ -21,6 +21,8 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.ListIndexesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.SyncOperations;
@@ -42,15 +44,10 @@ class ListIndexesIterableImpl<TResult> extends MongoIterableImpl<TResult> implem
     private BsonValue comment;
 
     ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TResult> resultClass,
-                            final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor) {
-        this(clientSession, namespace, resultClass, codecRegistry, readPreference, executor, true);
-    }
-
-    ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TResult> resultClass,
-                            final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor,
-                            final boolean retryReads) {
-        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads);
-        this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads);
+            final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor,
+            final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings);
+        this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.resultClass = notNull("resultClass", resultClass);
     }
 
@@ -67,6 +64,12 @@ public ListIndexesIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public ListIndexesIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public ListIndexesIterable<TResult> comment(@Nullable final String comment) {
         this.comment = comment != null ? new BsonString(comment) : null;
@@ -81,6 +84,10 @@ public ListIndexesIterable<TResult> comment(@Nullable final BsonValue comment) {
 
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
-        return operations.listIndexes(resultClass, getBatchSize(), maxTimeMS, comment);
+        return operations.listIndexes(resultClass, getBatchSize(), comment, getTimeoutMode());
+    }
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java
index 0ffc9cea7a5..c67106d357d 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java
@@ -21,7 +21,9 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ListSearchIndexesIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ExplainableReadOperation;
 import com.mongodb.internal.operation.ReadOperation;
@@ -54,11 +56,10 @@ final class ListSearchIndexesIterableImpl<TResult> extends MongoIterableImpl<TRe
 
     ListSearchIndexesIterableImpl(final MongoNamespace namespace, final OperationExecutor executor,
                                   final Class<TResult> resultClass, final CodecRegistry codecRegistry,
-                                  final ReadPreference readPreference, final boolean retryReads) {
-        super(null, executor, ReadConcern.DEFAULT, readPreference, retryReads);
-
+                                  final ReadPreference readPreference, final boolean retryReads, final TimeoutSettings timeoutSettings) {
+        super(null, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings);
         this.resultClass = resultClass;
-        this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads);
+        this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings);
         this.codecRegistry = codecRegistry;
     }
 
@@ -67,7 +68,6 @@ public ReadOperation<BatchCursor<TResult>> asReadOperation() {
         return asAggregateOperation();
     }
 
-
     @Override
     public ListSearchIndexesIterable<TResult> allowDiskUse(@Nullable final Boolean allowDiskUse) {
         this.allowDiskUse = allowDiskUse;
@@ -80,6 +80,12 @@ public ListSearchIndexesIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Override
+    public ListSearchIndexesIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public ListSearchIndexesIterable<TResult> maxTime(final long maxTime, final TimeUnit timeUnit) {
         notNull("timeUnit", timeUnit);
@@ -136,12 +142,18 @@ public <E> E explain(final Class<E> explainResultClass, final ExplainVerbosity v
     }
 
     private <E> E executeExplain(final Class<E> explainResultClass, @Nullable final ExplainVerbosity verbosity) {
-        return getExecutor().execute(asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)),
-                getReadPreference(), getReadConcern(), getClientSession());
+        return getExecutor().execute(asAggregateOperation()
+                        .asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession());
     }
 
     private ExplainableReadOperation<BatchCursor<TResult>> asAggregateOperation() {
-        return operations.listSearchIndexes(resultClass, maxTimeMS, indexName, getBatchSize(), collation, comment,
+        return operations.listSearchIndexes(resultClass, indexName, getBatchSize(), collation, comment,
                 allowDiskUse);
     }
+
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
+    }
+
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java
index 9c531f45d58..8a0107aafeb 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java
@@ -21,7 +21,9 @@
 import com.mongodb.ReadPreference;
 import com.mongodb.WriteConcern;
 import com.mongodb.client.ClientSession;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.Collation;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.binding.ReadBinding;
 import com.mongodb.internal.client.model.FindOptions;
 import com.mongodb.internal.operation.BatchCursor;
@@ -41,8 +43,7 @@
 import static com.mongodb.assertions.Assertions.notNull;
 
 @SuppressWarnings("deprecation")
-class MapReduceIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResult>
-        implements com.mongodb.client.MapReduceIterable<TResult> {
+class MapReduceIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResult> implements com.mongodb.client.MapReduceIterable<TResult> {
     private final SyncOperations<TDocument> operations;
     private final MongoNamespace namespace;
     private final Class<TResult> resultClass;
@@ -67,10 +68,10 @@ class MapReduceIterableImpl<TDocument, TResult> extends MongoIterableImpl<TResul
     MapReduceIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class<TDocument> documentClass,
                           final Class<TResult> resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference,
                           final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor,
-                          final String mapFunction, final String reduceFunction) {
-        super(clientSession, executor, readConcern, readPreference, false);
+                          final String mapFunction, final String reduceFunction, final TimeoutSettings timeoutSettings) {
+        super(clientSession, executor, readConcern, readPreference, false, timeoutSettings);
         this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern,
-                false, false);
+                false, false, timeoutSettings);
         this.namespace = notNull("namespace", namespace);
         this.resultClass = notNull("resultClass", resultClass);
         this.mapFunction = notNull("mapFunction", mapFunction);
@@ -160,6 +161,12 @@ public com.mongodb.client.MapReduceIterable<TResult> batchSize(final int batchSi
         return this;
     }
 
+    @Override
+    public com.mongodb.client.MapReduceIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        super.timeoutMode(timeoutMode);
+        return this;
+    }
+
     @Override
     public com.mongodb.client.MapReduceIterable<TResult> bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) {
         this.bypassDocumentValidation = bypassDocumentValidation;
@@ -181,11 +188,16 @@ ReadPreference getReadPreference() {
         }
     }
 
+
+    protected OperationExecutor getExecutor() {
+        return getExecutor(operations.createTimeoutSettings(maxTimeMS));
+    }
+
     @Override
     public ReadOperation<BatchCursor<TResult>> asReadOperation() {
         if (inline) {
             ReadOperation<MapReduceBatchCursor<TResult>> operation = operations.mapReduce(mapFunction, reduceFunction, finalizeFunction,
-                    resultClass, filter, limit, maxTimeMS, jsMode, scope, sort, verbose, collation);
+                    resultClass, filter, limit, jsMode, scope, sort, verbose, collation);
             return new WrappedMapReduceReadOperation<>(operation);
         } else {
             getExecutor().execute(createMapReduceToCollectionOperation(), getReadConcern(), getClientSession());
@@ -204,7 +216,7 @@ public ReadOperation<BatchCursor<TResult>> asReadOperation() {
 
     private WriteOperation<MapReduceStatistics> createMapReduceToCollectionOperation() {
         return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter,
-                limit, maxTimeMS, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation
+                limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation
         );
     }
 
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java
deleted file mode 100644
index 8703fc8ce2d..00000000000
--- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientDelegate.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright 2008-present MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.mongodb.client.internal;
-
-import com.mongodb.ClientSessionOptions;
-import com.mongodb.MongoClientException;
-import com.mongodb.MongoException;
-import com.mongodb.MongoInternalException;
-import com.mongodb.MongoQueryException;
-import com.mongodb.MongoSocketException;
-import com.mongodb.MongoTimeoutException;
-import com.mongodb.ReadConcern;
-import com.mongodb.ReadPreference;
-import com.mongodb.RequestContext;
-import com.mongodb.ServerApi;
-import com.mongodb.TransactionOptions;
-import com.mongodb.WriteConcern;
-import com.mongodb.client.ClientSession;
-import com.mongodb.client.SynchronousContextProvider;
-import com.mongodb.internal.IgnorableRequestContext;
-import com.mongodb.internal.binding.ClusterAwareReadWriteBinding;
-import com.mongodb.internal.binding.ClusterBinding;
-import com.mongodb.internal.binding.ReadBinding;
-import com.mongodb.internal.binding.ReadWriteBinding;
-import com.mongodb.internal.binding.WriteBinding;
-import com.mongodb.internal.connection.Cluster;
-import com.mongodb.internal.operation.ReadOperation;
-import com.mongodb.internal.operation.WriteOperation;
-import com.mongodb.internal.session.ServerSessionPool;
-import com.mongodb.lang.Nullable;
-import org.bson.codecs.configuration.CodecRegistry;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL;
-import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL;
-import static com.mongodb.ReadPreference.primary;
-import static com.mongodb.assertions.Assertions.isTrue;
-import static com.mongodb.assertions.Assertions.notNull;
-
-final class MongoClientDelegate {
-    private final Cluster cluster;
-    private final ServerSessionPool serverSessionPool;
-    private final Object originator;
-    private final OperationExecutor operationExecutor;
-    private final Crypt crypt;
-    @Nullable
-    private final ServerApi serverApi;
-    private final CodecRegistry codecRegistry;
-    @Nullable
-    private final SynchronousContextProvider contextProvider;
-    private final AtomicBoolean closed;
-
-    MongoClientDelegate(final Cluster cluster, final CodecRegistry codecRegistry,
-                        final Object originator, @Nullable final OperationExecutor operationExecutor,
-                        @Nullable final Crypt crypt, @Nullable final ServerApi serverApi,
-                        @Nullable final SynchronousContextProvider contextProvider) {
-        this.cluster = cluster;
-        this.codecRegistry = codecRegistry;
-        this.contextProvider = contextProvider;
-        this.serverSessionPool = new ServerSessionPool(cluster, serverApi);
-        this.originator = originator;
-        this.operationExecutor = operationExecutor == null ? new DelegateOperationExecutor() : operationExecutor;
-        this.crypt = crypt;
-        this.serverApi = serverApi;
-        this.closed = new AtomicBoolean();
-    }
-
-    public OperationExecutor getOperationExecutor() {
-        return operationExecutor;
-    }
-
-    public ClientSession createClientSession(final ClientSessionOptions options, final ReadConcern readConcern,
-                                             final WriteConcern writeConcern, final ReadPreference readPreference) {
-        notNull("readConcern", readConcern);
-        notNull("writeConcern", writeConcern);
-        notNull("readPreference", readPreference);
-
-        ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options)
-                .defaultTransactionOptions(
-                        TransactionOptions.merge(
-                                options.getDefaultTransactionOptions(),
-                                TransactionOptions.builder()
-                                        .readConcern(readConcern)
-                                        .writeConcern(writeConcern)
-                                        .readPreference(readPreference)
-                                        .build()))
-                .build();
-        return new ClientSessionImpl(serverSessionPool, originator, mergedOptions, this);
-    }
-
-    public void close() {
-        if (!closed.getAndSet(true)) {
-            if (crypt != null) {
-                crypt.close();
-            }
-            serverSessionPool.close();
-            cluster.close();
-        }
-    }
-
-    public Cluster getCluster() {
-        return cluster;
-    }
-
-    public CodecRegistry getCodecRegistry() {
-        return codecRegistry;
-    }
-
-    public ServerSessionPool getServerSessionPool() {
-        return serverSessionPool;
-    }
-
-    private class DelegateOperationExecutor implements OperationExecutor {
-        @Override
-        public <T> T execute(final ReadOperation<T> operation, final ReadPreference readPreference, final ReadConcern readConcern) {
-            return execute(operation, readPreference, readConcern, null);
-        }
-
-        @Override
-        public <T> T execute(final WriteOperation<T> operation, final ReadConcern readConcern) {
-            return execute(operation, readConcern, null);
-        }
-
-        @Override
-        public <T> T execute(final ReadOperation<T> operation, final ReadPreference readPreference, final ReadConcern readConcern,
-                             @Nullable final ClientSession session) {
-            if (session != null) {
-                session.notifyOperationInitiated(operation);
-            }
-
-            ClientSession actualClientSession = getClientSession(session);
-            ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null);
-
-            try {
-                if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) {
-                    throw new MongoClientException("Read preference in a transaction must be primary");
-                }
-                return operation.execute(binding);
-            } catch (MongoException e) {
-                labelException(actualClientSession, e);
-                clearTransactionContextOnTransientTransactionError(session, e);
-                throw e;
-            } finally {
-                binding.release();
-            }
-        }
-
-        @Override
-        public <T> T execute(final WriteOperation<T> operation, final ReadConcern readConcern, @Nullable final ClientSession session) {
-            if (session != null) {
-                session.notifyOperationInitiated(operation);
-            }
-
-            ClientSession actualClientSession = getClientSession(session);
-            WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null);
-
-            try {
-                return operation.execute(binding);
-            } catch (MongoException e) {
-                labelException(actualClientSession, e);
-                clearTransactionContextOnTransientTransactionError(session, e);
-                throw e;
-            } finally {
-                binding.release();
-            }
-        }
-
-        WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) {
-            return getReadWriteBinding(primary(), readConcern, session, ownsSession);
-        }
-
-        ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern,
-                                   final ClientSession session, final boolean ownsSession) {
-            return getReadWriteBinding(readPreference, readConcern, session, ownsSession);
-        }
-
-        ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference, final ReadConcern readConcern,
-                                             final ClientSession session, final boolean ownsSession) {
-            ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster,
-                    getReadPreferenceForBinding(readPreference, session), readConcern, serverApi, getContext());
-
-            if (crypt != null) {
-                readWriteBinding = new CryptBinding(readWriteBinding, crypt);
-            }
-
-            return new ClientSessionBinding(session, ownsSession, readWriteBinding);
-        }
-
-        private RequestContext getContext() {
-            RequestContext context = null;
-            if (contextProvider != null) {
-                context = contextProvider.getContext();
-            }
-            return context == null ? IgnorableRequestContext.INSTANCE : context;
-        }
-
-        private void labelException(final ClientSession session, final MongoException e) {
-            if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException
-                    || e instanceof MongoQueryException && e.getCode() == 91)
-                    && !e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) {
-                e.addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL);
-            }
-        }
-
-        private void clearTransactionContextOnTransientTransactionError(@Nullable final ClientSession session, final MongoException e) {
-            if (session != null && e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) {
-                session.clearTransactionContext();
-            }
-        }
-
-        private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) {
-            if (session == null) {
-                return readPreference;
-            }
-            if (session.hasActiveTransaction()) {
-                ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference();
-                if (readPreferenceForBinding == null) {
-                    throw new MongoInternalException("Invariant violated.  Transaction options read preference can not be null");
-                }
-                return readPreferenceForBinding;
-            }
-            return readPreference;
-        }
-
-        ClientSession getClientSession(@Nullable final ClientSession clientSessionFromOperation) {
-            ClientSession session;
-            if (clientSessionFromOperation != null) {
-                isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == originator);
-                session = clientSessionFromOperation;
-            } else {
-                session = createClientSession(ClientSessionOptions.builder().causallyConsistent(false).build(), ReadConcern.DEFAULT,
-                        WriteConcern.ACKNOWLEDGED, primary());
-            }
-            return session;
-        }
-    }
-}
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java
index 0a560442639..473d8ec4e8e 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java
@@ -20,19 +20,21 @@
 import com.mongodb.ClientSessionOptions;
 import com.mongodb.MongoClientSettings;
 import com.mongodb.MongoDriverInformation;
+import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
-import com.mongodb.TransactionOptions;
+import com.mongodb.WriteConcern;
 import com.mongodb.client.ChangeStreamIterable;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.ListDatabasesIterable;
 import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCluster;
 import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.MongoIterable;
 import com.mongodb.client.SynchronousContextProvider;
 import com.mongodb.connection.ClusterDescription;
 import com.mongodb.connection.SocketSettings;
 import com.mongodb.connection.TransportSettings;
-import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.connection.Cluster;
 import com.mongodb.internal.connection.DefaultClusterFactory;
 import com.mongodb.internal.connection.InternalConnectionPoolSettings;
@@ -48,8 +50,9 @@
 import org.bson.codecs.configuration.CodecRegistry;
 import org.bson.conversions.Bson;
 
-import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.client.internal.Crypts.createCrypt;
@@ -68,7 +71,8 @@ public final class MongoClientImpl implements MongoClient {
 
     private final MongoClientSettings settings;
     private final MongoDriverInformation mongoDriverInformation;
-    private final MongoClientDelegate delegate;
+    private final MongoClusterImpl delegate;
+    private final AtomicBoolean closed;
 
     public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation) {
         this(createCluster(settings, mongoDriverInformation), mongoDriverInformation, settings, null);
@@ -84,136 +88,172 @@ public MongoClientImpl(final Cluster cluster, final MongoDriverInformation mongo
             throw new IllegalArgumentException("The contextProvider must be an instance of "
                     + SynchronousContextProvider.class.getName() + " when using the synchronous driver");
         }
-        this.delegate = new MongoClientDelegate(notNull("cluster", cluster),
-                withUuidRepresentation(settings.getCodecRegistry(), settings.getUuidRepresentation()), this, operationExecutor,
-                autoEncryptionSettings == null ? null : createCrypt(this, autoEncryptionSettings), settings.getServerApi(),
-                (SynchronousContextProvider) settings.getContextProvider());
+
+        this.delegate = new MongoClusterImpl(autoEncryptionSettings, cluster,
+                                             withUuidRepresentation(settings.getCodecRegistry(), settings.getUuidRepresentation()),
+                                             (SynchronousContextProvider) settings.getContextProvider(),
+                                             autoEncryptionSettings == null ? null : createCrypt(settings, autoEncryptionSettings), this,
+                                             operationExecutor, settings.getReadConcern(), settings.getReadPreference(), settings.getRetryReads(),
+                                             settings.getRetryWrites(), settings.getServerApi(),
+                                             new ServerSessionPool(cluster, TimeoutSettings.create(settings), settings.getServerApi()),
+                                             TimeoutSettings.create(settings), settings.getUuidRepresentation(), settings.getWriteConcern());
+        this.closed = new AtomicBoolean();
         BsonDocument clientMetadataDocument = createClientMetadataDocument(settings.getApplicationName(), mongoDriverInformation);
+
         LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings));
     }
 
     @Override
-    public MongoDatabase getDatabase(final String databaseName) {
-        return new MongoDatabaseImpl(databaseName, delegate.getCodecRegistry(), settings.getReadPreference(), settings.getWriteConcern(),
-                settings.getRetryWrites(), settings.getRetryReads(), settings.getReadConcern(),
-                settings.getUuidRepresentation(), settings.getAutoEncryptionSettings(), delegate.getOperationExecutor());
+    public void close() {
+        if (!closed.getAndSet(true)) {
+            Crypt crypt = delegate.getCrypt();
+            if (crypt != null) {
+                crypt.close();
+            }
+            delegate.getServerSessionPool().close();
+            delegate.getCluster().close();
+        }
     }
 
     @Override
-    public MongoIterable<String> listDatabaseNames() {
-        return createListDatabaseNamesIterable(null);
+    public ClusterDescription getClusterDescription() {
+        return delegate.getCluster().getCurrentDescription();
     }
 
     @Override
-    public MongoIterable<String> listDatabaseNames(final ClientSession clientSession) {
-        notNull("clientSession", clientSession);
-        return createListDatabaseNamesIterable(clientSession);
+    public CodecRegistry getCodecRegistry() {
+        return delegate.getCodecRegistry();
     }
 
     @Override
-    public ListDatabasesIterable<Document> listDatabases() {
-        return listDatabases(Document.class);
+    public ReadPreference getReadPreference() {
+        return delegate.getReadPreference();
     }
 
     @Override
-    public <T> ListDatabasesIterable<T> listDatabases(final Class<T> clazz) {
-        return createListDatabasesIterable(null, clazz);
+    public WriteConcern getWriteConcern() {
+        return delegate.getWriteConcern();
     }
 
     @Override
-    public ListDatabasesIterable<Document> listDatabases(final ClientSession clientSession) {
-        return listDatabases(clientSession, Document.class);
+    public ReadConcern getReadConcern() {
+        return delegate.getReadConcern();
     }
 
     @Override
-    public <T> ListDatabasesIterable<T> listDatabases(final ClientSession clientSession, final Class<T> clazz) {
-        notNull("clientSession", clientSession);
-        return createListDatabasesIterable(clientSession, clazz);
+    public Long getTimeout(final TimeUnit timeUnit) {
+        return delegate.getTimeout(timeUnit);
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return delegate.withCodecRegistry(codecRegistry);
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return delegate.withReadPreference(readPreference);
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return delegate.withWriteConcern(writeConcern);
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return delegate.withReadConcern(readConcern);
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return delegate.withTimeout(timeout, timeUnit);
+    }
+
+    @Override
+    public MongoDatabase getDatabase(final String databaseName) {
+        return delegate.getDatabase(databaseName);
     }
 
     @Override
     public ClientSession startSession() {
-        return startSession(ClientSessionOptions
-                .builder()
-                .defaultTransactionOptions(TransactionOptions.builder()
-                        .readConcern(settings.getReadConcern())
-                        .writeConcern(settings.getWriteConcern())
-                        .build())
-                .build());
+        return delegate.startSession();
     }
 
     @Override
     public ClientSession startSession(final ClientSessionOptions options) {
-        return delegate.createClientSession(notNull("options", options),
-                settings.getReadConcern(), settings.getWriteConcern(), settings.getReadPreference());
+        return delegate.startSession(options);
     }
 
     @Override
-    public void close() {
-        delegate.close();
+    public MongoIterable<String> listDatabaseNames() {
+        return delegate.listDatabaseNames();
     }
 
     @Override
-    public ChangeStreamIterable<Document> watch() {
-        return watch(Collections.emptyList());
+    public MongoIterable<String> listDatabaseNames(final ClientSession clientSession) {
+        return delegate.listDatabaseNames(clientSession);
     }
 
     @Override
-    public <TResult> ChangeStreamIterable<TResult> watch(final Class<TResult> resultClass) {
-        return watch(Collections.emptyList(), resultClass);
+    public ListDatabasesIterable<Document> listDatabases() {
+        return delegate.listDatabases();
     }
 
     @Override
-    public ChangeStreamIterable<Document> watch(final List<? extends Bson> pipeline) {
-        return watch(pipeline, Document.class);
+    public ListDatabasesIterable<Document> listDatabases(final ClientSession clientSession) {
+        return delegate.listDatabases(clientSession);
     }
 
     @Override
-    public <TResult> ChangeStreamIterable<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
-        return createChangeStreamIterable(null, pipeline, resultClass);
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final Class<TResult> resultClass) {
+        return delegate.listDatabases(resultClass);
     }
 
     @Override
-    public ChangeStreamIterable<Document> watch(final ClientSession clientSession) {
-        return watch(clientSession, Collections.emptyList(), Document.class);
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final ClientSession clientSession, final Class<TResult> resultClass) {
+        return delegate.listDatabases(clientSession, resultClass);
     }
 
     @Override
-    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final Class<TResult> resultClass) {
-        return watch(clientSession, Collections.emptyList(), resultClass);
+    public ChangeStreamIterable<Document> watch() {
+        return delegate.watch();
     }
 
     @Override
-    public ChangeStreamIterable<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
-        return watch(clientSession, pipeline, Document.class);
+    public <TResult> ChangeStreamIterable<TResult> watch(final Class<TResult> resultClass) {
+        return delegate.watch(resultClass);
     }
 
     @Override
-    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
-                                                         final Class<TResult> resultClass) {
-        notNull("clientSession", clientSession);
-        return createChangeStreamIterable(clientSession, pipeline, resultClass);
+    public ChangeStreamIterable<Document> watch(final List<? extends Bson> pipeline) {
+        return delegate.watch(pipeline);
     }
 
     @Override
-    public ClusterDescription getClusterDescription() {
-        return delegate.getCluster().getCurrentDescription();
+    public <TResult> ChangeStreamIterable<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return delegate.watch(pipeline, resultClass);
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession) {
+        return delegate.watch(clientSession);
     }
 
-    private <TResult> ChangeStreamIterable<TResult> createChangeStreamIterable(@Nullable final ClientSession clientSession,
-                                                                               final List<? extends Bson> pipeline,
-                                                                               final Class<TResult> resultClass) {
-        return new ChangeStreamIterableImpl<>(clientSession, "admin", delegate.getCodecRegistry(), settings.getReadPreference(),
-                settings.getReadConcern(), delegate.getOperationExecutor(),
-                pipeline, resultClass, ChangeStreamLevel.CLIENT, settings.getRetryReads());
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final Class<TResult> resultClass) {
+        return delegate.watch(clientSession, resultClass);
     }
 
-    public Cluster getCluster() {
-        return delegate.getCluster();
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
+        return delegate.watch(clientSession, pipeline);
     }
 
-    public CodecRegistry getCodecRegistry() {
-        return delegate.getCodecRegistry();
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(
+            final ClientSession clientSession, final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return delegate.watch(clientSession, pipeline, resultClass);
     }
 
     private static Cluster createCluster(final MongoClientSettings settings,
@@ -221,7 +261,8 @@ private static Cluster createCluster(final MongoClientSettings settings,
         notNull("settings", settings);
         return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(),
                 settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().build(),
-                getStreamFactory(settings, false), getStreamFactory(settings, true),
+                TimeoutSettings.create(settings), getStreamFactory(settings, false),
+                TimeoutSettings.createHeartbeatSettings(settings), getStreamFactory(settings, true),
                 settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()),
                 settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(),
                 settings.getDnsClient());
@@ -239,13 +280,8 @@ private static StreamFactory getStreamFactory(final MongoClientSettings settings
         }
     }
 
-    private <T> ListDatabasesIterable<T> createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class<T> clazz) {
-        return new ListDatabasesIterableImpl<>(clientSession, clazz, delegate.getCodecRegistry(), ReadPreference.primary(),
-                delegate.getOperationExecutor(), settings.getRetryReads());
-    }
-
-    private MongoIterable<String> createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) {
-        return createListDatabasesIterable(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue());
+    public Cluster getCluster() {
+        return delegate.getCluster();
     }
 
     public ServerSessionPool getServerSessionPool() {
@@ -256,6 +292,10 @@ public OperationExecutor getOperationExecutor() {
         return delegate.getOperationExecutor();
     }
 
+    public TimeoutSettings getTimeoutSettings() {
+        return delegate.getTimeoutSettings();
+    }
+
     public MongoClientSettings getSettings() {
         return settings;
     }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java
new file mode 100644
index 00000000000..b3d03095070
--- /dev/null
+++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.internal;
+
+import com.mongodb.AutoEncryptionSettings;
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.MongoClientException;
+import com.mongodb.MongoException;
+import com.mongodb.MongoInternalException;
+import com.mongodb.MongoQueryException;
+import com.mongodb.MongoSocketException;
+import com.mongodb.MongoTimeoutException;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.RequestContext;
+import com.mongodb.ServerApi;
+import com.mongodb.TransactionOptions;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.ChangeStreamIterable;
+import com.mongodb.client.ClientSession;
+import com.mongodb.client.ListDatabasesIterable;
+import com.mongodb.client.MongoCluster;
+import com.mongodb.client.MongoDatabase;
+import com.mongodb.client.MongoIterable;
+import com.mongodb.client.SynchronousContextProvider;
+import com.mongodb.internal.IgnorableRequestContext;
+import com.mongodb.internal.TimeoutSettings;
+import com.mongodb.internal.binding.ClusterAwareReadWriteBinding;
+import com.mongodb.internal.binding.ClusterBinding;
+import com.mongodb.internal.binding.ReadBinding;
+import com.mongodb.internal.binding.ReadWriteBinding;
+import com.mongodb.internal.binding.WriteBinding;
+import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
+import com.mongodb.internal.connection.Cluster;
+import com.mongodb.internal.connection.OperationContext;
+import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext;
+import com.mongodb.internal.operation.ReadOperation;
+import com.mongodb.internal.operation.WriteOperation;
+import com.mongodb.internal.session.ServerSessionPool;
+import com.mongodb.lang.Nullable;
+import org.bson.BsonDocument;
+import org.bson.Document;
+import org.bson.UuidRepresentation;
+import org.bson.codecs.configuration.CodecRegistry;
+import org.bson.conversions.Bson;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL;
+import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL;
+import static com.mongodb.ReadPreference.primary;
+import static com.mongodb.assertions.Assertions.isTrue;
+import static com.mongodb.assertions.Assertions.notNull;
+import static com.mongodb.internal.TimeoutContext.createTimeoutContext;
+
+final class MongoClusterImpl implements MongoCluster {
+    @Nullable
+    private final AutoEncryptionSettings autoEncryptionSettings;
+    private final Cluster cluster;
+    private final CodecRegistry codecRegistry;
+    @Nullable
+    private final SynchronousContextProvider contextProvider;
+    @Nullable
+    private final Crypt crypt;
+    private final Object originator;
+    private final OperationExecutor operationExecutor;
+    private final ReadConcern readConcern;
+    private final ReadPreference readPreference;
+    private final boolean retryReads;
+    private final boolean retryWrites;
+    @Nullable
+    private final ServerApi serverApi;
+    private final ServerSessionPool serverSessionPool;
+    private final TimeoutSettings timeoutSettings;
+    private final UuidRepresentation uuidRepresentation;
+    private final WriteConcern writeConcern;
+
+    MongoClusterImpl(
+            @Nullable final AutoEncryptionSettings autoEncryptionSettings, final Cluster cluster, final CodecRegistry codecRegistry,
+            @Nullable final SynchronousContextProvider contextProvider, @Nullable final Crypt crypt, final Object originator,
+            @Nullable final OperationExecutor operationExecutor, final ReadConcern readConcern, final ReadPreference readPreference,
+            final boolean retryReads, final boolean retryWrites, @Nullable final ServerApi serverApi,
+            final ServerSessionPool serverSessionPool, final TimeoutSettings timeoutSettings, final UuidRepresentation uuidRepresentation,
+            final WriteConcern writeConcern) {
+        this.autoEncryptionSettings = autoEncryptionSettings;
+        this.cluster = cluster;
+        this.codecRegistry = codecRegistry;
+        this.contextProvider = contextProvider;
+        this.crypt = crypt;
+        this.originator = originator;
+        this.operationExecutor = operationExecutor != null ? operationExecutor : new OperationExecutorImpl(timeoutSettings);
+        this.readConcern = readConcern;
+        this.readPreference = readPreference;
+        this.retryReads = retryReads;
+        this.retryWrites = retryWrites;
+        this.serverApi = serverApi;
+        this.serverSessionPool = serverSessionPool;
+        this.timeoutSettings = timeoutSettings;
+        this.uuidRepresentation = uuidRepresentation;
+        this.writeConcern = writeConcern;
+    }
+
+    @Override
+    public CodecRegistry getCodecRegistry() {
+        return codecRegistry;
+    }
+
+    @Override
+    public ReadPreference getReadPreference() {
+        return readPreference;
+    }
+
+    @Override
+    public WriteConcern getWriteConcern() {
+        return writeConcern;
+    }
+
+    @Override
+    public ReadConcern getReadConcern() {
+        return readConcern;
+    }
+
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = timeoutSettings.getTimeoutMS();
+        return timeoutMS == null ? null : timeUnit.convert(timeoutMS, TimeUnit.MILLISECONDS);
+    }
+
+    @Override
+    public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) {
+        return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator,
+                operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings,
+                uuidRepresentation, writeConcern);
+    }
+
+    @Override
+    public MongoCluster withReadPreference(final ReadPreference readPreference) {
+        return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator,
+                operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings,
+                uuidRepresentation, writeConcern);
+    }
+
+    @Override
+    public MongoCluster withWriteConcern(final WriteConcern writeConcern) {
+        return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator,
+                operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings,
+                uuidRepresentation, writeConcern);
+    }
+
+    @Override
+    public MongoCluster withReadConcern(final ReadConcern readConcern) {
+        return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator,
+                operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings,
+                uuidRepresentation, writeConcern);
+    }
+
+    @Override
+    public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator,
+                operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool,
+                timeoutSettings.withTimeout(timeout, timeUnit), uuidRepresentation, writeConcern);
+    }
+
+    @Override
+    public MongoDatabase getDatabase(final String databaseName) {
+        return new MongoDatabaseImpl(databaseName, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern,
+                uuidRepresentation, autoEncryptionSettings, timeoutSettings, operationExecutor);
+    }
+
+    public Cluster getCluster() {
+        return cluster;
+    }
+
+    @Nullable
+    public Crypt getCrypt() {
+        return crypt;
+    }
+
+    public OperationExecutor getOperationExecutor() {
+        return operationExecutor;
+    }
+
+    public ServerSessionPool getServerSessionPool() {
+        return serverSessionPool;
+    }
+
+    public TimeoutSettings getTimeoutSettings() {
+        return timeoutSettings;
+    }
+
+    @Override
+    public ClientSession startSession() {
+        return startSession(ClientSessionOptions
+                .builder()
+                .defaultTransactionOptions(TransactionOptions.builder()
+                        .readConcern(readConcern)
+                        .writeConcern(writeConcern)
+                        .build())
+                .build());
+    }
+
+    @Override
+    public ClientSession startSession(final ClientSessionOptions options) {
+            notNull("options", options);
+
+            ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options)
+                    .defaultTransactionOptions(
+                            TransactionOptions.merge(
+                                    options.getDefaultTransactionOptions(),
+                                    TransactionOptions.builder()
+                                            .readConcern(readConcern)
+                                            .writeConcern(writeConcern)
+                                            .readPreference(readPreference)
+                                            .build()))
+                    .build();
+            return new ClientSessionImpl(serverSessionPool, originator, mergedOptions, operationExecutor);
+    }
+
+    @Override
+    public MongoIterable<String> listDatabaseNames() {
+        return createListDatabaseNamesIterable(null);
+    }
+
+    @Override
+    public MongoIterable<String> listDatabaseNames(final ClientSession clientSession) {
+        notNull("clientSession", clientSession);
+        return createListDatabaseNamesIterable(clientSession);
+    }
+
+    @Override
+    public ListDatabasesIterable<Document> listDatabases() {
+        return listDatabases(Document.class);
+    }
+
+    @Override
+    public ListDatabasesIterable<Document> listDatabases(final ClientSession clientSession) {
+        return listDatabases(clientSession, Document.class);
+    }
+
+    @Override
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final Class<TResult> clazz) {
+        return createListDatabasesIterable(null, clazz);
+    }
+
+    @Override
+    public <TResult> ListDatabasesIterable<TResult> listDatabases(final ClientSession clientSession, final Class<TResult> clazz) {
+        notNull("clientSession", clientSession);
+        return createListDatabasesIterable(clientSession, clazz);
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch() {
+        return watch(Collections.emptyList());
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final Class<TResult> clazz) {
+        return watch(Collections.emptyList(), clazz);
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final List<? extends Bson> pipeline) {
+        return watch(pipeline, Document.class);
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final List<? extends Bson> pipeline, final Class<TResult> clazz) {
+        return createChangeStreamIterable(null, pipeline, clazz);
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession) {
+        return watch(clientSession, Collections.emptyList());
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final Class<TResult> clazz) {
+        return watch(clientSession, Collections.emptyList(), clazz);
+    }
+
+    @Override
+    public ChangeStreamIterable<Document> watch(final ClientSession clientSession, final List<? extends Bson> pipeline) {
+        return watch(clientSession, pipeline, Document.class);
+    }
+
+    @Override
+    public <TResult> ChangeStreamIterable<TResult> watch(final ClientSession clientSession, final List<? extends Bson> pipeline,
+            final Class<TResult> clazz) {
+        notNull("clientSession", clientSession);
+        return createChangeStreamIterable(clientSession, pipeline, clazz);
+    }
+
+    private <T> ListDatabasesIterable<T> createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class<T> clazz) {
+        return new ListDatabasesIterableImpl<>(clientSession, clazz, codecRegistry, ReadPreference.primary(), operationExecutor, retryReads, timeoutSettings);
+    }
+
+    private MongoIterable<String> createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) {
+        return createListDatabasesIterable(clientSession, BsonDocument.class)
+                .nameOnly(true)
+                .map(result -> result.getString("name").getValue());
+    }
+
+    private <TResult> ChangeStreamIterable<TResult> createChangeStreamIterable(@Nullable final ClientSession clientSession,
+            final List<? extends Bson> pipeline, final Class<TResult> resultClass) {
+        return new ChangeStreamIterableImpl<>(clientSession, "admin", codecRegistry, readPreference,
+                readConcern, operationExecutor, pipeline, resultClass, ChangeStreamLevel.CLIENT,
+                retryReads, timeoutSettings);
+    }
+
+    final class OperationExecutorImpl implements OperationExecutor {
+        private final TimeoutSettings executorTimeoutSettings;
+
+        OperationExecutorImpl(final TimeoutSettings executorTimeoutSettings) {
+            this.executorTimeoutSettings = executorTimeoutSettings;
+        }
+
+        @Override
+        public <T> T execute(final ReadOperation<T> operation, final ReadPreference readPreference, final ReadConcern readConcern) {
+            return execute(operation, readPreference, readConcern, null);
+        }
+
+        @Override
+        public <T> T execute(final WriteOperation<T> operation, final ReadConcern readConcern) {
+            return execute(operation, readConcern, null);
+        }
+
+        @Override
+        public <T> T execute(final ReadOperation<T> operation, final ReadPreference readPreference, final ReadConcern readConcern,
+                @Nullable final ClientSession session) {
+            if (session != null) {
+                session.notifyOperationInitiated(operation);
+            }
+
+            ClientSession actualClientSession = getClientSession(session);
+            ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null);
+
+            try {
+                if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) {
+                    throw new MongoClientException("Read preference in a transaction must be primary");
+                }
+                return operation.execute(binding);
+            } catch (MongoException e) {
+                labelException(actualClientSession, e);
+                clearTransactionContextOnTransientTransactionError(session, e);
+                throw e;
+            } finally {
+                binding.release();
+            }
+        }
+
+        @Override
+        public <T> T execute(final WriteOperation<T> operation, final ReadConcern readConcern,
+                @Nullable final ClientSession session) {
+            if (session != null) {
+                session.notifyOperationInitiated(operation);
+            }
+
+            ClientSession actualClientSession = getClientSession(session);
+            WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null);
+
+            try {
+                return operation.execute(binding);
+            } catch (MongoException e) {
+                labelException(actualClientSession, e);
+                clearTransactionContextOnTransientTransactionError(session, e);
+                throw e;
+            } finally {
+                binding.release();
+            }
+        }
+
+        @Override
+        public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) {
+            if (Objects.equals(executorTimeoutSettings, newTimeoutSettings)) {
+                return this;
+            }
+            return new OperationExecutorImpl(newTimeoutSettings);
+        }
+
+        @Override
+        public TimeoutSettings getTimeoutSettings() {
+            return executorTimeoutSettings;
+        }
+
+        WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) {
+            return getReadWriteBinding(primary(), readConcern, session, ownsSession);
+        }
+
+        ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session,
+                final boolean ownsSession) {
+            return getReadWriteBinding(readPreference, readConcern, session, ownsSession);
+        }
+
+        ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference,
+                final ReadConcern readConcern, final ClientSession session, final boolean ownsSession) {
+
+            ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster,
+                    getReadPreferenceForBinding(readPreference, session), readConcern, getOperationContext(session, readConcern));
+
+            if (crypt != null) {
+                readWriteBinding = new CryptBinding(readWriteBinding, crypt);
+            }
+
+            return new ClientSessionBinding(session, ownsSession, readWriteBinding);
+        }
+
+        private OperationContext getOperationContext(final ClientSession session, final ReadConcern readConcern) {
+            return new OperationContext(
+                    getRequestContext(),
+                    new ReadConcernAwareNoOpSessionContext(readConcern),
+                    createTimeoutContext(session, executorTimeoutSettings),
+                    serverApi);
+        }
+
+        private RequestContext getRequestContext() {
+            RequestContext context = null;
+            if (contextProvider != null) {
+                context = contextProvider.getContext();
+            }
+            return context == null ? IgnorableRequestContext.INSTANCE : context;
+        }
+
+        private void labelException(final ClientSession session, final MongoException e) {
+            if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException
+                    || e instanceof MongoQueryException && e.getCode() == 91)
+                    && !e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) {
+                e.addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL);
+            }
+        }
+
+        private void clearTransactionContextOnTransientTransactionError(@Nullable final ClientSession session, final MongoException e) {
+            if (session != null && e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) {
+                session.clearTransactionContext();
+            }
+        }
+
+        private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) {
+            if (session == null) {
+                return readPreference;
+            }
+            if (session.hasActiveTransaction()) {
+                ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference();
+                if (readPreferenceForBinding == null) {
+                    throw new MongoInternalException("Invariant violated.  Transaction options read preference can not be null");
+                }
+                return readPreferenceForBinding;
+            }
+            return readPreference;
+        }
+
+        ClientSession getClientSession(@Nullable final ClientSession clientSessionFromOperation) {
+            ClientSession session;
+            if (clientSessionFromOperation != null) {
+                isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == originator);
+                session = clientSessionFromOperation;
+            } else {
+                session = startSession(ClientSessionOptions.builder().
+                        causallyConsistent(false)
+                        .defaultTransactionOptions(
+                                TransactionOptions.builder()
+                                        .readConcern(ReadConcern.DEFAULT)
+                                        .readPreference(ReadPreference.primary())
+                                        .writeConcern(WriteConcern.ACKNOWLEDGED).build())
+                        .build());
+            }
+            return session;
+        }
+    }
+}
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java
index 2dca4baf3eb..8466950d7e5 100755
--- a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java
@@ -59,11 +59,11 @@
 import com.mongodb.client.result.InsertManyResult;
 import com.mongodb.client.result.InsertOneResult;
 import com.mongodb.client.result.UpdateResult;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.bulk.WriteRequest;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
 import com.mongodb.internal.operation.IndexHelper;
-import com.mongodb.internal.operation.RenameCollectionOperation;
 import com.mongodb.internal.operation.SyncOperations;
 import com.mongodb.internal.operation.WriteOperation;
 import com.mongodb.lang.Nullable;
@@ -77,6 +77,7 @@
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.assertions.Assertions.notNull;
 import static com.mongodb.assertions.Assertions.notNullElements;
@@ -85,6 +86,7 @@
 import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE;
 import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE;
 import static java.util.Collections.singletonList;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation;
 
 class MongoCollectionImpl<TDocument> implements MongoCollection<TDocument> {
@@ -100,12 +102,15 @@ class MongoCollectionImpl<TDocument> implements MongoCollection<TDocument> {
     private final UuidRepresentation uuidRepresentation;
     @Nullable
     private final AutoEncryptionSettings autoEncryptionSettings;
+
+    private final TimeoutSettings timeoutSettings;
     private final OperationExecutor executor;
 
     MongoCollectionImpl(final MongoNamespace namespace, final Class<TDocument> documentClass, final CodecRegistry codecRegistry,
-                        final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites,
-                        final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation,
-                        @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) {
+            final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites,
+            final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation,
+            @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings,
+            final OperationExecutor executor) {
         this.namespace = notNull("namespace", namespace);
         this.documentClass = notNull("documentClass", documentClass);
         this.codecRegistry = notNull("codecRegistry", codecRegistry);
@@ -117,8 +122,9 @@ class MongoCollectionImpl<TDocument> implements MongoCollection<TDocument> {
         this.executor = notNull("executor", executor);
         this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation);
         this.autoEncryptionSettings = autoEncryptionSettings;
+        this.timeoutSettings = timeoutSettings;
         this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern,
-                retryWrites, retryReads);
+                retryWrites, retryReads, timeoutSettings);
     }
 
     @Override
@@ -151,34 +157,46 @@ public ReadConcern getReadConcern() {
         return readConcern;
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = timeoutSettings.getTimeoutMS();
+        return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS);
+    }
+
     @Override
     public <NewTDocument> MongoCollection<NewTDocument> withDocumentClass(final Class<NewTDocument> clazz) {
         return new MongoCollectionImpl<>(namespace, clazz, codecRegistry, readPreference, writeConcern, retryWrites,
-                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoCollection<TDocument> withCodecRegistry(final CodecRegistry codecRegistry) {
         return new MongoCollectionImpl<>(namespace, documentClass, withUuidRepresentation(codecRegistry, uuidRepresentation),
-                readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoCollection<TDocument> withReadPreference(final ReadPreference readPreference) {
         return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites,
-                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoCollection<TDocument> withWriteConcern(final WriteConcern writeConcern) {
         return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites,
-                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoCollection<TDocument> withReadConcern(final ReadConcern readConcern) {
         return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites,
-                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
+    }
+
+    @Override
+    public MongoCollection<TDocument> withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, retryReads,
+                readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor);
     }
 
     @Override
@@ -219,11 +237,13 @@ public long estimatedDocumentCount() {
 
     @Override
     public long estimatedDocumentCount(final EstimatedDocumentCountOptions options) {
-        return executor.execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null);
     }
 
     private long executeCount(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) {
-        return executor.execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession);
     }
 
     @Override
@@ -252,7 +272,7 @@ public <TResult> DistinctIterable<TResult> distinct(final ClientSession clientSe
     private <TResult> DistinctIterable<TResult> createDistinctIterable(@Nullable final ClientSession clientSession, final String fieldName,
                                                                        final Bson filter, final Class<TResult> resultClass) {
         return new DistinctIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry,
-                readPreference, readConcern, executor, fieldName, filter, retryReads);
+                readPreference, readConcern, executor, fieldName, filter, retryReads, timeoutSettings);
     }
 
     @Override
@@ -303,7 +323,7 @@ public <TResult> FindIterable<TResult> find(final ClientSession clientSession, f
     private <TResult> FindIterable<TResult> createFindIterable(@Nullable final ClientSession clientSession, final Bson filter,
                                                                final Class<TResult> resultClass) {
         return new FindIterableImpl<>(clientSession, namespace, this.documentClass, resultClass, codecRegistry,
-                readPreference, readConcern, executor, filter, retryReads);
+                readPreference, readConcern, executor, filter, retryReads, timeoutSettings);
     }
 
     @Override
@@ -332,7 +352,7 @@ private <TResult> AggregateIterable<TResult> createAggregateIterable(@Nullable f
                                                                          final List<? extends Bson> pipeline,
                                                                          final Class<TResult> resultClass) {
         return new AggregateIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry,
-                readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads);
+                readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads, timeoutSettings);
     }
 
     @Override
@@ -381,7 +401,7 @@ private <TResult> ChangeStreamIterable<TResult> createChangeStreamIterable(@Null
                                                                                final List<? extends Bson> pipeline,
                                                                                final Class<TResult> resultClass) {
         return new ChangeStreamIterableImpl<>(clientSession, namespace, codecRegistry, readPreference, readConcern, executor,
-                pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads);
+                pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads, timeoutSettings);
     }
 
     @SuppressWarnings("deprecation")
@@ -417,7 +437,7 @@ private <TResult> com.mongodb.client.MapReduceIterable<TResult> createMapReduceI
                                                                          final String mapFunction, final String reduceFunction,
                                                                          final Class<TResult> resultClass) {
         return new MapReduceIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry,
-                readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction);
+                readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction, timeoutSettings);
     }
 
     @Override
@@ -446,7 +466,8 @@ private BulkWriteResult executeBulkWrite(@Nullable final ClientSession clientSes
                                              final List<? extends WriteModel<? extends TDocument>> requests,
                                              final BulkWriteOptions options) {
         notNull("requests", requests);
-        return executor.execute(operations.bulkWrite(requests, options), readConcern, clientSession);
+        return getExecutor(timeoutSettings)
+                .execute(operations.bulkWrite(requests, options), readConcern, clientSession);
     }
 
     @Override
@@ -501,8 +522,10 @@ public InsertManyResult insertMany(final ClientSession clientSession, final List
     }
 
     private InsertManyResult executeInsertMany(@Nullable final ClientSession clientSession, final List<? extends TDocument> documents,
-                                   final InsertManyOptions options) {
-        return toInsertManyResult(executor.execute(operations.insertMany(documents, options), readConcern, clientSession));
+                                                final InsertManyOptions options) {
+        return toInsertManyResult(
+                getExecutor(timeoutSettings).execute(operations.insertMany(documents, options), readConcern, clientSession)
+        );
     }
 
     @Override
@@ -693,7 +716,8 @@ public TDocument findOneAndDelete(final ClientSession clientSession, final Bson
     @Nullable
     private TDocument executeFindOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter,
                                               final FindOneAndDeleteOptions options) {
-        return executor.execute(operations.findOneAndDelete(filter, options), readConcern, clientSession);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.findOneAndDelete(filter, options), readConcern, clientSession);
     }
 
     @Override
@@ -725,7 +749,8 @@ public TDocument findOneAndReplace(final ClientSession clientSession, final Bson
     @Nullable
     private TDocument executeFindOneAndReplace(@Nullable final ClientSession clientSession, final Bson filter, final TDocument replacement,
                                                final FindOneAndReplaceOptions options) {
-        return executor.execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession);
     }
 
     @Override
@@ -757,7 +782,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson
     @Nullable
     private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final Bson update,
                                               final FindOneAndUpdateOptions options) {
-        return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession);
     }
 
     @Override
@@ -789,7 +815,8 @@ public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson
     @Nullable
     private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter,
                                               final List<? extends Bson> update, final FindOneAndUpdateOptions options) {
-        return executor.execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession);
+        return getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession);
     }
 
     @Override
@@ -840,14 +867,14 @@ public void updateSearchIndex(final String indexName, final Bson definition) {
         notNull("indexName", indexName);
         notNull("definition", definition);
 
-        executor.execute(operations.updateSearchIndex(indexName, definition), readConcern, null);
+        getExecutor(timeoutSettings).execute(operations.updateSearchIndex(indexName, definition), readConcern, null);
     }
 
     @Override
     public void dropSearchIndex(final String indexName) {
         notNull("indexName", indexName);
 
-        executor.execute(operations.dropSearchIndex(indexName), readConcern, null);
+        getExecutor(timeoutSettings).execute(operations.dropSearchIndex(indexName), readConcern, null);
     }
 
     @Override
@@ -862,7 +889,8 @@ public <TResult> ListSearchIndexesIterable<TResult> listSearchIndexes(final Clas
     }
 
     private void executeDrop(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) {
-        executor.execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession);
+        getExecutor(timeoutSettings)
+                .execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession);
     }
 
     @Override
@@ -909,12 +937,13 @@ public List<String> createIndexes(final ClientSession clientSession, final List<
 
     private List<String> executeCreateIndexes(@Nullable final ClientSession clientSession, final List<IndexModel> indexes,
                                               final CreateIndexOptions createIndexOptions) {
-        executor.execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession);
+        getExecutor(operations.createTimeoutSettings(createIndexOptions))
+                .execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession);
         return IndexHelper.getIndexNames(indexes, codecRegistry);
     }
 
     private List<String> executeCreateSearchIndexes(final List<SearchIndexModel> searchIndexModels) {
-        executor.execute(operations.createSearchIndexes(searchIndexModels), readConcern, null);
+        getExecutor(timeoutSettings).execute(operations.createSearchIndexes(searchIndexModels), readConcern, null);
         return IndexHelper.getSearchIndexNames(searchIndexModels);
     }
 
@@ -942,12 +971,12 @@ public <TResult> ListIndexesIterable<TResult> listIndexes(final ClientSession cl
     private <TResult> ListIndexesIterable<TResult> createListIndexesIterable(@Nullable final ClientSession clientSession,
                                                                              final Class<TResult> resultClass) {
         return new ListIndexesIterableImpl<>(clientSession, getNamespace(), resultClass, codecRegistry, ReadPreference.primary(),
-                executor, retryReads);
+                executor, retryReads, timeoutSettings);
     }
 
     private <TResult> ListSearchIndexesIterable<TResult> createListSearchIndexesIterable(final Class<TResult> resultClass) {
-        return new ListSearchIndexesIterableImpl<>(getNamespace(), executor,
-                resultClass, codecRegistry, readPreference, retryReads);
+        return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, resultClass, codecRegistry, readPreference,
+                retryReads, timeoutSettings);
     }
 
     @Override
@@ -1014,13 +1043,16 @@ public void dropIndexes(final ClientSession clientSession, final DropIndexOption
     }
 
     private void executeDropIndex(@Nullable final ClientSession clientSession, final String indexName,
-                                  final DropIndexOptions dropIndexOptions) {
-        notNull("dropIndexOptions", dropIndexOptions);
-        executor.execute(operations.dropIndex(indexName, dropIndexOptions), readConcern, clientSession);
+                                  final DropIndexOptions options) {
+        notNull("options", options);
+        getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.dropIndex(indexName, options), readConcern, clientSession);
     }
 
-    private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions dropIndexOptions) {
-        executor.execute(operations.dropIndex(keys, dropIndexOptions), readConcern, clientSession);
+    private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) {
+        notNull("options", options);
+        getExecutor(operations.createTimeoutSettings(options))
+                .execute(operations.dropIndex(keys, options), readConcern, clientSession);
     }
 
     @Override
@@ -1047,9 +1079,8 @@ public void renameCollection(final ClientSession clientSession, final MongoNames
 
     private void executeRenameCollection(@Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace,
                                          final RenameCollectionOptions renameCollectionOptions) {
-        executor.execute(new RenameCollectionOperation(getNamespace(), newCollectionNamespace, writeConcern)
-                        .dropTarget(renameCollectionOptions.isDropTarget()),
-                readConcern, clientSession);
+        getExecutor(timeoutSettings)
+                .execute(operations.renameCollection(newCollectionNamespace, renameCollectionOptions), readConcern, clientSession);
     }
 
     private DeleteResult executeDelete(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions deleteOptions,
@@ -1081,7 +1112,8 @@ private BulkWriteResult executeSingleWriteRequest(@Nullable final ClientSession
                                                       final WriteOperation<BulkWriteResult> writeOperation,
                                                       final WriteRequest.Type type) {
         try {
-            return executor.execute(writeOperation, readConcern, clientSession);
+            return getExecutor(timeoutSettings)
+                    .execute(writeOperation, readConcern, clientSession);
         } catch (MongoBulkWriteException e) {
             if (e.getWriteErrors().isEmpty()) {
                 throw new MongoWriteConcernException(e.getWriteConcernError(),
@@ -1138,4 +1170,8 @@ private UpdateResult toUpdateResult(final com.mongodb.bulk.BulkWriteResult resul
         }
     }
 
+    private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) {
+        return executor.withTimeoutSettings(timeoutSettings);
+    }
+
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java
index 283f118af6b..b2b3284980d 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java
@@ -31,6 +31,7 @@
 import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.CreateViewOptions;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.client.model.AggregationLevel;
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel;
 import com.mongodb.internal.operation.SyncOperations;
@@ -43,10 +44,12 @@
 
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.MongoNamespace.COMMAND_COLLECTION_NAME;
 import static com.mongodb.MongoNamespace.checkDatabaseNameValidity;
 import static com.mongodb.assertions.Assertions.notNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation;
 
 /**
@@ -60,16 +63,19 @@ public class MongoDatabaseImpl implements MongoDatabase {
     private final boolean retryWrites;
     private final boolean retryReads;
     private final ReadConcern readConcern;
+    private final UuidRepresentation uuidRepresentation;
     @Nullable
     private final AutoEncryptionSettings autoEncryptionSettings;
+
+    private final TimeoutSettings timeoutSettings;
     private final OperationExecutor executor;
-    private final UuidRepresentation uuidRepresentation;
     private final SyncOperations<BsonDocument> operations;
 
     public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, final ReadPreference readPreference,
-                             final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads,
-                             final ReadConcern readConcern, final UuidRepresentation uuidRepresentation,
-                             @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) {
+            final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads,
+            final ReadConcern readConcern, final UuidRepresentation uuidRepresentation,
+            @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings,
+            final OperationExecutor executor) {
         checkDatabaseNameValidity(name);
         this.name = notNull("name", name);
         this.codecRegistry = notNull("codecRegistry", codecRegistry);
@@ -80,9 +86,10 @@ public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, f
         this.readConcern = notNull("readConcern", readConcern);
         this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation);
         this.autoEncryptionSettings = autoEncryptionSettings;
+        this.timeoutSettings = timeoutSettings;
         this.executor = notNull("executor", executor);
         this.operations = new SyncOperations<>(new MongoNamespace(name, COMMAND_COLLECTION_NAME), BsonDocument.class, readPreference,
-                codecRegistry, readConcern, writeConcern, retryWrites, retryReads);
+                codecRegistry, readConcern, writeConcern, retryWrites, retryReads, timeoutSettings);
     }
 
     @Override
@@ -110,28 +117,40 @@ public ReadConcern getReadConcern() {
         return readConcern;
     }
 
+    @Override
+    public Long getTimeout(final TimeUnit timeUnit) {
+        Long timeoutMS = timeoutSettings.getTimeoutMS();
+        return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS);
+    }
+
     @Override
     public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) {
         return new MongoDatabaseImpl(name, withUuidRepresentation(codecRegistry, uuidRepresentation), readPreference, writeConcern, retryWrites,
-                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoDatabase withReadPreference(final ReadPreference readPreference) {
         return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern,
-                uuidRepresentation, autoEncryptionSettings, executor);
+                uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoDatabase withWriteConcern(final WriteConcern writeConcern) {
         return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern,
-                uuidRepresentation, autoEncryptionSettings, executor);
+                uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
     public MongoDatabase withReadConcern(final ReadConcern readConcern) {
         return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern,
-                uuidRepresentation, autoEncryptionSettings, executor);
+                uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
+    }
+
+    @Override
+    public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) {
+        return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern,
+                uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor);
     }
 
     @Override
@@ -142,7 +161,7 @@ public MongoCollection<Document> getCollection(final String collectionName) {
     @Override
     public <TDocument> MongoCollection<TDocument> getCollection(final String collectionName, final Class<TDocument> documentClass) {
         return new MongoCollectionImpl<>(new MongoNamespace(name, collectionName), documentClass, codecRegistry, readPreference,
-                writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor);
+                writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor);
     }
 
     @Override
@@ -193,7 +212,7 @@ private <TResult> TResult executeCommand(@Nullable final ClientSession clientSes
         if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) {
             throw new MongoClientException("Read preference in a transaction must be primary");
         }
-        return executor.execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession);
+        return getExecutor().execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession);
     }
 
     @Override
@@ -208,7 +227,7 @@ public void drop(final ClientSession clientSession) {
     }
 
     private void executeDrop(@Nullable final ClientSession clientSession) {
-        executor.execute(operations.dropDatabase(), readConcern, clientSession);
+        getExecutor().execute(operations.dropDatabase(), readConcern, clientSession);
     }
 
     @Override
@@ -251,7 +270,7 @@ private <TResult> ListCollectionsIterableImpl<TResult> createListCollectionsIter
                                                                                      final Class<TResult> resultClass,
                                                                                      final boolean collectionNamesOnly) {
         return new ListCollectionsIterableImpl<>(clientSession, name, collectionNamesOnly, resultClass, codecRegistry,
-                ReadPreference.primary(), executor, retryReads);
+                ReadPreference.primary(), executor, retryReads, timeoutSettings);
     }
 
     @Override
@@ -278,8 +297,8 @@ public void createCollection(final ClientSession clientSession, final String col
 
     private void executeCreateCollection(@Nullable final ClientSession clientSession, final String collectionName,
                                          final CreateCollectionOptions createCollectionOptions) {
-        executor.execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), readConcern,
-                clientSession);
+        getExecutor().execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings),
+                        readConcern, clientSession);
     }
 
     @Override
@@ -374,19 +393,23 @@ private <TResult> AggregateIterable<TResult> createAggregateIterable(@Nullable f
                                                                          final List<? extends Bson> pipeline,
                                                                          final Class<TResult> resultClass) {
         return new AggregateIterableImpl<>(clientSession, name, Document.class, resultClass, codecRegistry,
-                readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads);
+                readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads, timeoutSettings);
     }
 
     private <TResult> ChangeStreamIterable<TResult> createChangeStreamIterable(@Nullable final ClientSession clientSession,
                                                                                final List<? extends Bson> pipeline,
                                                                                final Class<TResult> resultClass) {
         return new ChangeStreamIterableImpl<>(clientSession, name, codecRegistry, readPreference, readConcern, executor,
-                pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads);
+                pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads, timeoutSettings);
     }
 
     private void executeCreateView(@Nullable final ClientSession clientSession, final String viewName, final String viewOn,
                                    final List<? extends Bson> pipeline, final CreateViewOptions createViewOptions) {
         notNull("createViewOptions", createViewOptions);
-        executor.execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession);
+        getExecutor().execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession);
+    }
+
+    private OperationExecutor getExecutor() {
+        return executor.withTimeoutSettings(timeoutSettings);
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java
index 86c2e7b99eb..d4b948c07a1 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java
@@ -22,13 +22,17 @@
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.MongoCursor;
 import com.mongodb.client.MongoIterable;
+import com.mongodb.client.cursor.TimeoutMode;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.BatchCursor;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.lang.Nullable;
 
 import java.util.Collection;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 
+import static com.mongodb.assertions.Assertions.isTrueArgument;
 import static com.mongodb.assertions.Assertions.notNull;
 
 /**
@@ -40,15 +44,18 @@ public abstract class MongoIterableImpl<TResult> implements MongoIterable<TResul
     private final OperationExecutor executor;
     private final ReadPreference readPreference;
     private final boolean retryReads;
+    private final TimeoutSettings timeoutSettings;
     private Integer batchSize;
+    private TimeoutMode timeoutMode;
 
     public MongoIterableImpl(@Nullable final ClientSession clientSession, final OperationExecutor executor, final ReadConcern readConcern,
-                             final ReadPreference readPreference, final boolean retryReads) {
+                             final ReadPreference readPreference, final boolean retryReads, final TimeoutSettings timeoutSettings) {
         this.clientSession = clientSession;
         this.executor = notNull("executor", executor);
         this.readConcern = notNull("readConcern", readConcern);
         this.readPreference = notNull("readPreference", readPreference);
-        this.retryReads = notNull("retryReads", retryReads);
+        this.retryReads = retryReads;
+        this.timeoutSettings = timeoutSettings;
     }
 
     public abstract ReadOperation<BatchCursor<TResult>> asReadOperation();
@@ -58,8 +65,10 @@ ClientSession getClientSession() {
         return clientSession;
     }
 
-    OperationExecutor getExecutor() {
-        return executor;
+    protected abstract OperationExecutor getExecutor();
+
+    OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) {
+        return executor.withTimeoutSettings(timeoutSettings);
     }
 
     ReadPreference getReadPreference() {
@@ -74,6 +83,10 @@ protected boolean getRetryReads() {
         return retryReads;
     }
 
+    protected TimeoutSettings getTimeoutSettings() {
+        return timeoutSettings;
+    }
+
     @Nullable
     public Integer getBatchSize() {
         return batchSize;
@@ -85,6 +98,19 @@ public MongoIterable<TResult> batchSize(final int batchSize) {
         return this;
     }
 
+    @Nullable
+    public TimeoutMode getTimeoutMode() {
+        return timeoutMode;
+    }
+
+    public MongoIterable<TResult> timeoutMode(final TimeoutMode timeoutMode) {
+        if (timeoutSettings.getTimeoutMS() == null) {
+            throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set.");
+        }
+        this.timeoutMode = timeoutMode;
+        return this;
+    }
+
     @Override
     public MongoCursor<TResult> iterator() {
         return new MongoBatchCursorAdapter<>(execute());
@@ -127,6 +153,18 @@ public <A extends Collection<? super TResult>> A into(final A target) {
     }
 
     private BatchCursor<TResult> execute() {
-        return executor.execute(asReadOperation(), readPreference, readConcern, clientSession);
+        return getExecutor().execute(asReadOperation(), readPreference, readConcern, clientSession);
+    }
+
+
+    protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) {
+        notNull("timeUnit", timeUnit);
+        Long timeoutMS = timeoutSettings.getTimeoutMS();
+        long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit);
+
+        isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0
+                || timeoutMS > maxAwaitTimeMS);
+
+        return maxAwaitTimeMS;
     }
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java
index 3786dc1ad6f..37df6dffe32 100644
--- a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java
+++ b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java
@@ -19,6 +19,7 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.WriteOperation;
 import com.mongodb.lang.Nullable;
@@ -33,10 +34,10 @@ public interface OperationExecutor {
     /**
      * Execute the read operation with the given read preference.
      *
-     * @param <T> the operations result type.
-     * @param operation the read operation.
+     * @param <T>            the operations result type.
+     * @param operation      the read operation.
      * @param readPreference the read preference.
-     * @param readConcern the read concern
+     * @param readConcern    the read concern
      * @return the result of executing the operation.
      */
     <T> T execute(ReadOperation<T> operation, ReadPreference readPreference, ReadConcern readConcern);
@@ -44,9 +45,9 @@ public interface OperationExecutor {
     /**
      * Execute the write operation.
      *
-     * @param operation the write operation.
+     * @param <T>         the operations result type.
+     * @param operation   the write operation.
      * @param readConcern the read concern
-     * @param <T> the operations result type.
      * @return the result of executing the operation.
      */
     <T> T execute(WriteOperation<T> operation, ReadConcern readConcern);
@@ -54,11 +55,11 @@ public interface OperationExecutor {
     /**
      * Execute the read operation with the given read preference.
      *
-     * @param <T> the operations result type.
-     * @param operation the read operation.
+     * @param <T>            the operations result type.
+     * @param operation      the read operation.
      * @param readPreference the read preference.
-     * @param readConcern the read concern
-     * @param session the session to associate this operation with
+     * @param readConcern    the read concern
+     * @param session        the session to associate this operation with
      * @return the result of executing the operation.
      */
     <T> T execute(ReadOperation<T> operation, ReadPreference readPreference, ReadConcern readConcern, @Nullable ClientSession session);
@@ -66,11 +67,28 @@ public interface OperationExecutor {
     /**
      * Execute the write operation.
      *
-     * @param operation the write operation.
+     * @param <T>         the operations result type.
+     * @param operation   the write operation.
      * @param readConcern the read concern
-     * @param session the session to associate this operation with
-     * @param <T> the operations result type.
+     * @param session     the session to associate this operation with
      * @return the result of executing the operation.
      */
     <T> T execute(WriteOperation<T> operation, ReadConcern readConcern, @Nullable ClientSession session);
+
+    /**
+     * Create a new OperationExecutor with a specific timeout settings
+     *
+     * @param timeoutSettings the TimeoutContext to use for the operations
+     * @return the new operation executor with the set timeout context
+     * @since 5.2
+     */
+    OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings);
+
+    /**
+     * Returns the current timeout settings
+     *
+     * @return the timeout settings
+     * @since 5.2
+     */
+    TimeoutSettings getTimeoutSettings();
 }
diff --git a/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java
new file mode 100644
index 00000000000..6a5ef68e615
--- /dev/null
+++ b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.internal;
+
+import com.mongodb.client.MongoCollection;
+import com.mongodb.client.MongoDatabase;
+import com.mongodb.internal.TimeoutContext;
+import com.mongodb.internal.time.Timeout;
+import com.mongodb.lang.Nullable;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * <p>This class is not part of the public API and may be removed or changed at any time</p>
+ */
+public final class TimeoutHelper {
+    private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit.";
+
+    private TimeoutHelper() {
+        //NOP
+    }
+
+    public static <T> MongoCollection<T> collectionWithTimeout(final MongoCollection<T> collection,
+                                                               final String message,
+                                                               @Nullable final Timeout timeout) {
+        if (timeout != null) {
+            return timeout.call(MILLISECONDS,
+                    () -> collection.withTimeout(0, MILLISECONDS),
+                    ms -> collection.withTimeout(ms, MILLISECONDS),
+                    () -> TimeoutContext.throwMongoTimeoutException(message));
+        }
+        return collection;
+    }
+
+    public static <T> MongoCollection<T> collectionWithTimeout(final MongoCollection<T> collection,
+                                                               @Nullable final Timeout timeout) {
+        return collectionWithTimeout(collection, DEFAULT_TIMEOUT_MESSAGE, timeout);
+    }
+
+    public static MongoDatabase databaseWithTimeout(final MongoDatabase database,
+                                                    final String message,
+                                                    @Nullable final Timeout timeout) {
+        if (timeout != null) {
+            return timeout.call(MILLISECONDS,
+                    () -> database.withTimeout(0, MILLISECONDS),
+                    ms -> database.withTimeout(ms, MILLISECONDS),
+                    () -> TimeoutContext.throwMongoTimeoutException(message));
+        }
+        return database;
+    }
+
+    public static MongoDatabase databaseWithTimeout(final MongoDatabase database,
+                                                    @Nullable final Timeout timeout) {
+        return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout);
+    }
+
+}
diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java
index 864fdf004dc..6d529741a24 100644
--- a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java
+++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java
@@ -19,6 +19,7 @@
 import com.mongodb.AutoEncryptionSettings;
 import com.mongodb.MongoUpdatedEncryptedFieldsException;
 import com.mongodb.annotations.Beta;
+import com.mongodb.annotations.Reason;
 import com.mongodb.client.FindIterable;
 import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.model.CreateCollectionOptions;
@@ -108,7 +109,7 @@ public interface ClientEncryption extends Closeable {
      * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption
      * @mongodb.driver.manual reference/operator/aggregation/match/ $match
      */
-    @Beta(Beta.Reason.SERVER)
+    @Beta(Reason.SERVER)
    BsonDocument encryptExpression(Bson expression, EncryptOptions options);
 
     /**
diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java
index ef965f0ae95..2ac985f21a6 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java
@@ -195,11 +195,11 @@ public void shouldPassAllOutcomes(final int maxPoolSize,
     }
 
     private void assertEventEquality(final TestCommandListener commandListener, final List<ExpectedEvent> expectedStartEvents) {
-        List<CommandEvent> actualStartedEvents = commandListener.getCommandStartedEvents();
+        List<CommandStartedEvent> actualStartedEvents = commandListener.getCommandStartedEvents();
         assertEquals(expectedStartEvents.size(), actualStartedEvents.size());
         for (int i = 0; i < expectedStartEvents.size(); i++) {
             ExpectedEvent expectedEvent = expectedStartEvents.get(i);
-            CommandStartedEvent actualEvent = (CommandStartedEvent) actualStartedEvents.get(i);
+            CommandStartedEvent actualEvent = actualStartedEvents.get(i);
             assertEquals(expectedEvent.getDatabase(), actualEvent.getDatabaseName(), "Database name");
             assertEquals(expectedEvent.getCommandName(), actualEvent.getCommandName(), "Command name");
         }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java
index 64f9568e4ed..25abafc65ee 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionTest.java
@@ -20,12 +20,14 @@
 import com.mongodb.MongoClientSettings;
 import com.mongodb.MongoCommandException;
 import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoWriteConcernException;
 import com.mongodb.WriteConcern;
 import com.mongodb.client.model.CreateCollectionOptions;
 import com.mongodb.client.model.ValidationOptions;
 import com.mongodb.client.test.CollectionHelper;
 import com.mongodb.event.CommandEvent;
+import com.mongodb.event.CommandStartedEvent;
 import com.mongodb.internal.connection.TestCommandListener;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
@@ -35,6 +37,7 @@
 import org.bson.BsonUndefined;
 import org.bson.BsonValue;
 import org.bson.codecs.BsonDocumentCodec;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -50,6 +53,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.concurrent.TimeUnit;
 
 import static com.mongodb.ClusterFixture.getEnv;
 import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled;
@@ -93,6 +97,11 @@ protected BsonDocument getDefinition() {
         return definition;
     }
 
+
+    private boolean hasTimeoutError(@Nullable final BsonValue expectedResult) {
+        return hasErrorField(expectedResult, "isTimeoutError");
+    }
+
     private boolean hasErrorContainsField(@Nullable final BsonValue expectedResult) {
         return hasErrorField(expectedResult, "errorContains");
     }
@@ -127,7 +136,6 @@ public void setUp() {
         assumeTrue("Client side encryption tests disabled", hasEncryptionTestsEnabled());
         assumeFalse("runOn requirements not satisfied", skipTest);
         assumeFalse("Skipping count tests", filename.startsWith("count."));
-        assumeFalse("Skipping timeoutMS tests", filename.startsWith("timeoutMS."));
 
         assumeFalse(definition.getString("skipReason", new BsonString("")).getValue(), definition.containsKey("skipReason"));
 
@@ -262,6 +270,11 @@ public void setUp() {
         MongoClientSettings.Builder mongoClientSettingsBuilder = Fixture.getMongoClientSettingsBuilder()
                         .addCommandListener(commandListener);
 
+        if (clientOptions.containsKey("timeoutMS")) {
+            long timeoutMs = clientOptions.getInt32("timeoutMS").longValue();
+            mongoClientSettingsBuilder.timeout(timeoutMs, TimeUnit.MILLISECONDS);
+        }
+
         if (!kmsProvidersMap.isEmpty()) {
             mongoClientSettingsBuilder.autoEncryptionSettings(AutoEncryptionSettings.builder()
                     .keyVaultNamespace(keyVaultNamespace)
@@ -276,6 +289,19 @@ public void setUp() {
         createMongoClient(mongoClientSettingsBuilder.build());
         database = getDatabase(databaseName);
         helper = new JsonPoweredCrudTestHelper(description, database, database.getCollection(collectionName, BsonDocument.class));
+
+        if (definition.containsKey("failPoint")) {
+            collectionHelper.runAdminCommand(definition.getDocument("failPoint"));
+        }
+    }
+
+    @After
+    public void cleanUp() {
+        if (collectionHelper != null && definition.containsKey("failPoint")) {
+            collectionHelper.runAdminCommand(new BsonDocument("configureFailPoint",
+                    definition.getDocument("failPoint").getString("configureFailPoint"))
+                    .append("mode", new BsonString("off")));
+        }
     }
 
     protected abstract void createMongoClient(MongoClientSettings settings);
@@ -285,12 +311,15 @@ public void setUp() {
 
     @Test
     public void shouldPassAllOutcomes() {
+        assumeTrue("Skipping timeoutMS tests", filename.startsWith("timeoutMS."));
         for (BsonValue cur : definition.getArray("operations")) {
             BsonDocument operation = cur.asDocument();
             String operationName = operation.getString("name").getValue();
             BsonValue expectedResult = operation.get("result");
             try {
                 BsonDocument actualOutcome = helper.getOperationResults(operation);
+                assertFalse(String.format("Expected a timeout error but got: %s", actualOutcome.toJson()), hasTimeoutError(expectedResult));
+
                 if (expectedResult != null) {
                     BsonValue actualResult = actualOutcome.get("result", new BsonString("No result or error"));
                     assertBsonValue("Expected operation result differs from actual", expectedResult, actualResult);
@@ -302,6 +331,9 @@ public void shouldPassAllOutcomes() {
                         getErrorCodeNameField(expectedResult), operationName), hasErrorCodeNameField(expectedResult));
             } catch (Exception e) {
                 boolean passedAssertion = false;
+               if (hasTimeoutError(expectedResult) && e instanceof MongoOperationTimeoutException){
+                   passedAssertion = true;
+               }
                 if (hasErrorContainsField(expectedResult)) {
                     String expectedError = getErrorContainsField(expectedResult);
                     assertTrue(String.format("Expected '%s' but got '%s' for operation %s", expectedError, e.getMessage(),
@@ -325,8 +357,8 @@ public void shouldPassAllOutcomes() {
         }
 
         if (definition.containsKey("expectations")) {
-            List<CommandEvent> expectedEvents = getExpectedEvents(definition.getArray("expectations"), "default", null);
-            List<CommandEvent> events = commandListener.getCommandStartedEvents();
+            List<CommandEvent> expectedEvents = getExpectedEvents(definition.getArray("expectations"), specDocument.getString("database_name").getValue(), null);
+            List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
             assertEventsEquality(expectedEvents, events);
         }
 
diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java
new file mode 100644
index 00000000000..418f874aabe
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java
@@ -0,0 +1,954 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client;
+
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.ClusterFixture;
+import com.mongodb.ConnectionString;
+import com.mongodb.CursorType;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.MongoCredential;
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoSocketReadTimeoutException;
+import com.mongodb.MongoTimeoutException;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.TransactionOptions;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.gridfs.GridFSBucket;
+import com.mongodb.client.gridfs.GridFSDownloadStream;
+import com.mongodb.client.gridfs.GridFSUploadStream;
+import com.mongodb.client.model.CreateCollectionOptions;
+import com.mongodb.client.model.changestream.ChangeStreamDocument;
+import com.mongodb.client.model.changestream.FullDocument;
+import com.mongodb.client.test.CollectionHelper;
+import com.mongodb.event.CommandEvent;
+import com.mongodb.event.CommandFailedEvent;
+import com.mongodb.event.CommandStartedEvent;
+import com.mongodb.event.CommandSucceededEvent;
+import com.mongodb.event.ConnectionClosedEvent;
+import com.mongodb.event.ConnectionCreatedEvent;
+import com.mongodb.event.ConnectionReadyEvent;
+import com.mongodb.internal.connection.ServerHelper;
+import com.mongodb.internal.connection.TestCommandListener;
+import com.mongodb.internal.connection.TestConnectionPoolListener;
+import com.mongodb.test.FlakyTest;
+import org.bson.BsonDocument;
+import org.bson.BsonInt32;
+import org.bson.BsonTimestamp;
+import org.bson.Document;
+import org.bson.codecs.BsonDocumentCodec;
+import org.bson.types.ObjectId;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Named;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless;
+import static com.mongodb.ClusterFixture.getConnectionString;
+import static com.mongodb.ClusterFixture.isAuthenticated;
+import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet;
+import static com.mongodb.ClusterFixture.isServerlessTest;
+import static com.mongodb.ClusterFixture.isStandalone;
+import static com.mongodb.ClusterFixture.serverVersionAtLeast;
+import static com.mongodb.ClusterFixture.sleep;
+import static com.mongodb.client.Fixture.getDefaultDatabaseName;
+import static com.mongodb.client.Fixture.getPrimary;
+import static java.util.Arrays.asList;
+import static java.util.Collections.singletonList;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+/**
+ * See
+ * <a href="https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md">Prose Tests</a>.
+ */
+@SuppressWarnings("checkstyle:VisibilityModifier")
+public abstract class AbstractClientSideOperationsTimeoutProseTest {
+
+    protected static final String FAIL_COMMAND_NAME = "failCommand";
+    protected static final String GRID_FS_BUCKET_NAME = "db.fs";
+    private static final AtomicInteger COUNTER = new AtomicInteger();
+
+    protected MongoNamespace namespace;
+    protected MongoNamespace gridFsFileNamespace;
+    protected MongoNamespace gridFsChunksNamespace;
+
+    protected CollectionHelper<BsonDocument> collectionHelper;
+    private CollectionHelper<BsonDocument> filesCollectionHelper;
+    private CollectionHelper<BsonDocument> chunksCollectionHelper;
+
+    protected TestCommandListener commandListener;
+
+    protected abstract MongoClient createMongoClient(MongoClientSettings mongoClientSettings);
+
+    protected abstract GridFSBucket createGridFsBucket(MongoDatabase mongoDatabase, String bucketName);
+
+    protected abstract boolean isAsync();
+
+    protected int postSessionCloseSleep() {
+        return 0;
+    }
+
+    @SuppressWarnings("try")
+    @FlakyTest(maxAttempts = 3)
+    @DisplayName("4. Background Connection Pooling - timeoutMS used for handshake commands")
+    public void testBackgroundConnectionPoolingTimeoutMSUsedForHandshakeCommands() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isAuthenticated());
+        assumeFalse(isServerlessTest());
+
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                + "    mode: {"
+                + "        times: 1"
+                + "    },"
+                + "    data: {"
+                + "        failCommands: [\"saslContinue\"],"
+                + "        blockConnection: true,"
+                + "        blockTimeMS: 150,"
+                + "        appName: \"timeoutBackgroundPoolTest\""
+                + "    }"
+                + "}");
+
+        TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener();
+
+        try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder()
+                .applicationName("timeoutBackgroundPoolTest")
+                .applyToConnectionPoolSettings(builder -> {
+                    builder.minSize(1);
+                    builder.addConnectionPoolListener(connectionPoolListener);
+                })
+                .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) {
+
+            assertDoesNotThrow(() ->
+                    connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionClosedEvent.class),
+                            10, TimeUnit.SECONDS));
+        }
+    }
+
+    @SuppressWarnings("try")
+    @FlakyTest(maxAttempts = 3)
+    @DisplayName("4. Background Connection Pooling - timeoutMS is refreshed for each handshake command")
+    public void testBackgroundConnectionPoolingTimeoutMSIsRefreshedForEachHandshakeCommand() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isAuthenticated());
+        assumeFalse(isServerlessTest());
+
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                + "    mode: \"alwaysOn\","
+                + "    data: {"
+                + "        failCommands: [\"hello\", \"isMaster\", \"saslContinue\"],"
+                + "        blockConnection: true,"
+                + "        blockTimeMS: 150,"
+                + "        appName: \"refreshTimeoutBackgroundPoolTest\""
+                + "    }"
+                + "}");
+
+        TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener();
+
+        try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder()
+                .applicationName("refreshTimeoutBackgroundPoolTest")
+                .applyToConnectionPoolSettings(builder -> {
+                    builder.minSize(1);
+                    builder.addConnectionPoolListener(connectionPoolListener);
+                })
+                .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) {
+
+            assertDoesNotThrow(() ->
+                    connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionReadyEvent.class),
+                            10, TimeUnit.SECONDS));
+        }
+    }
+
+    @FlakyTest(maxAttempts = 3)
+    @DisplayName("5. Blocking Iteration Methods - Tailable cursors")
+    public void testBlockingIterationMethodsTailableCursor() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isServerlessTest());
+
+        collectionHelper.create(namespace.getCollectionName(),
+                new CreateCollectionOptions().capped(true).sizeInBytes(10 * 1024 * 1024));
+        collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY);
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: \"alwaysOn\","
+                + "  data: {"
+                + "    failCommands: [\"getMore\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(150)
+                + "  }"
+                + "}");
+
+        try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) {
+            MongoCollection<Document> collection = client.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (MongoCursor<Document> cursor = collection.find().cursorType(CursorType.Tailable).cursor()) {
+                Document document = assertDoesNotThrow(cursor::next);
+                assertEquals(1, document.get("x"));
+                assertThrows(MongoOperationTimeoutException.class, cursor::next);
+            }
+
+            List<CommandSucceededEvent> events = commandListener.getCommandSucceededEvents();
+            assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count());
+            long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count();
+            assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " +  getMoreCount);
+        }
+    }
+
+    @FlakyTest(maxAttempts = 3)
+    @DisplayName("5. Blocking Iteration Methods - Change Streams")
+    public void testBlockingIterationMethodsChangeStream() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isDiscoverableReplicaSet());
+        assumeFalse(isServerlessTest());
+        assumeFalse(isAsync()); // Async change stream cursor is non-deterministic for cursor::next
+
+        BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0);
+        collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions());
+        sleep(applyTimeoutMultiplierForServerless(2000));
+        collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY);
+
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: \"alwaysOn\","
+                + "  data: {"
+                + "    failCommands: [\"getMore\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(150)
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(applyTimeoutMultiplierForServerless(250), TimeUnit.MILLISECONDS))) {
+
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+            try (MongoChangeStreamCursor<ChangeStreamDocument<Document>> cursor = collection.watch(
+                    singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}")))
+                    .startAtOperationTime(startTime)
+                    .fullDocument(FullDocument.UPDATE_LOOKUP)
+                    .cursor()) {
+                ChangeStreamDocument<Document> document = assertDoesNotThrow(cursor::next);
+
+                Document fullDocument = document.getFullDocument();
+                assertNotNull(fullDocument);
+                assertEquals(1, fullDocument.get("x"));
+                assertThrows(MongoOperationTimeoutException.class, cursor::next);
+            }
+            List<CommandSucceededEvent> events = commandListener.getCommandSucceededEvents();
+            assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("aggregate")).count());
+            long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count();
+            assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " +  getMoreCount);
+        }
+    }
+
+    @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out")
+    @Test
+    public void testGridFSUploadViaOpenUploadStreamTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(205))
+                + "  }"
+                + "}");
+
+        chunksCollectionHelper.create();
+        filesCollectionHelper.create();
+
+        try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS))) {
+            MongoDatabase database = client.getDatabase(namespace.getDatabaseName());
+            GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME);
+
+            try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){
+                uploadStream.write(0x12);
+                assertThrows(MongoOperationTimeoutException.class, uploadStream::close);
+            }
+        }
+    }
+
+    @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out")
+    @Test
+    public void testAbortingGridFsUploadStreamTimeout() throws Throwable {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"delete\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(305))
+                + "  }"
+                + "}");
+
+        chunksCollectionHelper.create();
+        filesCollectionHelper.create();
+
+        try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + applyTimeoutMultiplierForServerless(300), TimeUnit.MILLISECONDS))) {
+            MongoDatabase database = client.getDatabase(namespace.getDatabaseName());
+            GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2);
+
+            try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){
+                uploadStream.write(new byte[]{0x01, 0x02, 0x03, 0x04});
+                assertThrows(MongoOperationTimeoutException.class, uploadStream::abort);
+            }
+        }
+    }
+
+    @DisplayName("6. GridFS Download")
+    @Test
+    public void testGridFsDownloadStreamTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        chunksCollectionHelper.create();
+        filesCollectionHelper.create();
+
+        filesCollectionHelper.insertDocuments(singletonList(BsonDocument.parse(
+                "{"
+                        + "   _id: {"
+                        + "     $oid: \"000000000000000000000005\""
+                        + "   },"
+                        + "   length: 10,"
+                        + "   chunkSize: 4,"
+                        + "   uploadDate: {"
+                        + "     $date: \"1970-01-01T00:00:00.000Z\""
+                        + "   },"
+                        + "   md5: \"57d83cd477bfb1ccd975ab33d827a92b\","
+                        + "   filename: \"length-10\","
+                        + "   contentType: \"application/octet-stream\","
+                        + "   aliases: [],"
+                        + "   metadata: {}"
+                        + "}"
+        )), WriteConcern.MAJORITY);
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { skip: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"find\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(95))
+                + "  }"
+                + "}");
+
+        try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(rtt + applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) {
+            MongoDatabase database = client.getDatabase(namespace.getDatabaseName());
+            GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2);
+
+            try (GridFSDownloadStream downloadStream = gridFsBucket.openDownloadStream(new ObjectId("000000000000000000000005"))){
+                assertThrows(MongoOperationTimeoutException.class, downloadStream::read);
+
+                List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
+                List<CommandStartedEvent> findCommands = events.stream().filter(e -> e.getCommandName().equals("find")).collect(Collectors.toList());
+
+                assertEquals(2, findCommands.size());
+                assertEquals(gridFsFileNamespace.getCollectionName(), findCommands.get(0).getCommand().getString("find").getValue());
+                assertEquals(gridFsChunksNamespace.getCollectionName(), findCommands.get(1).getCommand().getString("find").getValue());
+            }
+        }
+    }
+
+    @DisplayName("8. Server Selection 1 / 2")
+    @ParameterizedTest(name = "[{index}] {0}")
+    @MethodSource("test8ServerSelectionArguments")
+    public void test8ServerSelection(final String connectionString) {
+        assumeFalse(isServerlessTest());
+        int timeoutBuffer = 100; // 5 in spec, Java is slower
+        // 1. Create a MongoClient
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .applyConnectionString(new ConnectionString(connectionString)))
+        ) {
+            long start = System.nanoTime();
+            // 2. Using client, execute:
+            Throwable throwable = assertThrows(MongoTimeoutException.class, () -> {
+                mongoClient.getDatabase("admin").runCommand(new BsonDocument("ping", new BsonInt32(1)));
+            });
+            // Expect this to fail with a server selection timeout error after no more than 15ms [this is increased]
+            long elapsed = msElapsedSince(start);
+            assertTrue(throwable.getMessage().contains("while waiting for a server"));
+            assertTrue(elapsed < 10 + timeoutBuffer, "Took too long to time out, elapsedMS: " + elapsed);
+        }
+    }
+
+    @DisplayName("8. Server Selection 2 / 2")
+    @ParameterizedTest(name = "[{index}] {0}")
+    @MethodSource("test8ServerSelectionHandshakeArguments")
+    public void test8ServerSelectionHandshake(final String ignoredTestName, final int timeoutMS, final int serverSelectionTimeoutMS) {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isAuthenticated());
+        assumeFalse(isServerlessTest());
+
+        MongoCredential credential = getConnectionString().getCredential();
+        assertNotNull(credential);
+        assertNull(credential.getAuthenticationMechanism());
+
+        MongoNamespace namespace = generateNamespace();
+        collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace);
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: \"alwaysOn\","
+                + "  data: {"
+                + "    failCommands: [\"saslContinue\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: 350"
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(timeoutMS, TimeUnit.MILLISECONDS)
+                .applyToClusterSettings(b -> b.serverSelectionTimeout(serverSelectionTimeoutMS, TimeUnit.MILLISECONDS))
+                .retryWrites(false))) {
+
+            long start = System.nanoTime();
+            assertThrows(MongoOperationTimeoutException.class, () -> {
+                mongoClient.getDatabase(namespace.getDatabaseName())
+                        .getCollection(namespace.getCollectionName())
+                        .insertOne(new Document("x", 1));
+            });
+            long elapsed = msElapsedSince(start);
+            assertTrue(elapsed <= 310, "Took too long to time out, elapsedMS: " + elapsed);
+        }
+    }
+
+    @SuppressWarnings("try")
+    @DisplayName("9. End Session. The timeout specified via the MongoClient timeoutMS option")
+    @Test
+    public void test9EndSessionClientTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        assumeFalse(isServerlessTest());
+
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"abortTransaction\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(150)
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder().retryWrites(false)
+                .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession()) {
+                session.startTransaction();
+                collection.insertOne(session, new Document("x", 1));
+
+                long start = System.nanoTime();
+                session.close();
+                long elapsed = msElapsedSince(start) - postSessionCloseSleep();
+                assertTrue(elapsed <= applyTimeoutMultiplierForServerless(150), "Took too long to time out, elapsedMS: " + elapsed);
+            }
+        }
+        CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() ->
+                commandListener.getCommandFailedEvent("abortTransaction"));
+        assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable());
+    }
+
+    @SuppressWarnings("try")
+    @DisplayName("9. End Session. The timeout specified via the ClientSession defaultTimeoutMS option")
+    @Test
+    public void test9EndSessionSessionTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        assumeFalse(isServerlessTest());
+
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"abortTransaction\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(150)
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless((100)), TimeUnit.MILLISECONDS).build())) {
+                session.startTransaction();
+                collection.insertOne(session, new Document("x", 1));
+
+                long start = System.nanoTime();
+                session.close();
+                long elapsed = msElapsedSince(start) - postSessionCloseSleep();
+                assertTrue(elapsed <= applyTimeoutMultiplierForServerless(150), "Took too long to time out, elapsedMS: " + elapsed);
+            }
+        }
+        CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() ->
+                commandListener.getCommandFailedEvent("abortTransaction"));
+        assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable());
+    }
+
+    @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with commit")
+    @Test
+    public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithCommit() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + 25
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) {
+                session.startTransaction();
+                collection.insertOne(session, new Document("x", 1));
+                sleep(applyTimeoutMultiplierForServerless(200));
+
+                assertDoesNotThrow(session::commitTransaction);
+            }
+        }
+        assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("commitTransaction"));
+    }
+
+    @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with abort")
+    @Test
+    public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithAbort() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + 25
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) {
+                session.startTransaction();
+                collection.insertOne(session, new Document("x", 1));
+                sleep(applyTimeoutMultiplierForServerless(200));
+
+                assertDoesNotThrow(session::close);
+            }
+        }
+        assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("abortTransaction"));
+    }
+
+    @DisplayName("10. Convenient Transactions")
+    @Test
+    public void test10ConvenientTransactions() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        assumeFalse(isAsync());
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 2 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\", \"abortTransaction\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(150)
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession()) {
+                assertThrows(MongoOperationTimeoutException.class,
+                        () -> session.withTransaction(() -> collection.insertOne(session, new Document("x", 1))));
+            }
+
+            List<CommandEvent> failedEvents = commandListener.getEvents().stream()
+                    .filter(e -> e instanceof CommandFailedEvent)
+                    .collect(Collectors.toList());
+
+            assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("insert")).count());
+            assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("abortTransaction")).count());
+        }
+    }
+
+    @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout")
+    @Test
+    public void test10CustomTestWithTransactionUsesASingleTimeout() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        assumeFalse(isAsync());
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: { times: 1 },"
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(25)
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) {
+                assertThrows(MongoOperationTimeoutException.class,
+                        () -> session.withTransaction(() -> {
+                            collection.insertOne(session, new Document("x", 1));
+                            sleep(applyTimeoutMultiplierForServerless(200));
+                            return true;
+                        })
+                );
+            }
+        }
+    }
+
+    @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout - lock")
+    @Test
+    public void test10CustomTestWithTransactionUsesASingleTimeoutWithLock() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+        assumeFalse(isAsync());
+        collectionHelper.runAdminCommand("{"
+                + "  configureFailPoint: \"failCommand\","
+                + "  mode: \"alwaysOn\","
+                + "  data: {"
+                + "    failCommands: [\"insert\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(25)
+                + "    errorCode: " + 24
+                + "    errorLabels: [\"TransientTransactionError\"]"
+                + "  }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS).build())) {
+                assertThrows(MongoOperationTimeoutException.class,
+                        () -> session.withTransaction(() -> {
+                            collection.insertOne(session, new Document("x", 1));
+                            sleep(applyTimeoutMultiplierForServerless(200));
+                            return true;
+                        })
+                );
+            }
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @Test
+    @DisplayName("Should ignore wTimeoutMS of WriteConcern to initial and subsequent commitTransaction operations")
+    public void shouldIgnoreWtimeoutMsOfWriteConcernToInitialAndSubsequentCommitTransactionOperations() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS)
+                    .build())) {
+                session.startTransaction(TransactionOptions.builder()
+                        .writeConcern(WriteConcern.ACKNOWLEDGED.withWTimeout(applyTimeoutMultiplierForServerless(100), TimeUnit.MILLISECONDS))
+                        .build());
+                collection.insertOne(session, new Document("x", 1));
+                sleep(applyTimeoutMultiplierForServerless(200));
+
+                assertDoesNotThrow(session::commitTransaction);
+                //repeat commit.
+                assertDoesNotThrow(session::commitTransaction);
+            }
+        }
+        List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction");
+        assertEquals(2, commandStartedEvents.size());
+
+        commandStartedEvents.forEach(e -> {
+            BsonDocument command = e.getCommand();
+            if (command.containsKey("writeConcern")) {
+                BsonDocument writeConcern = command.getDocument("writeConcern");
+                assertFalse(writeConcern.isEmpty());
+                assertFalse(writeConcern.containsKey("wtimeout"));
+            }});
+    }
+
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("KillCursors is not executed after getMore network error when timeout is not enabled")
+    @Test
+    public void testKillCursorsIsNotExecutedAfterGetMoreNetworkErrorWhenTimeoutIsNotEnabled() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isServerlessTest());
+
+        long rtt = ClusterFixture.getPrimaryRTT();
+        collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions());
+        collectionHelper.insertDocuments(new Document(), new Document());
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"failCommand\","
+                + "    mode: { times: 1},"
+                + "    data: {"
+                + "        failCommands: [\"getMore\" ],"
+                + "        blockConnection: true,"
+                + "        blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(600))
+                + "    }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .retryReads(true)
+                .applyToSocketSettings(builder -> builder.readTimeout(applyTimeoutMultiplierForServerless(500), TimeUnit.MILLISECONDS)))) {
+
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+
+            MongoCursor<Document> cursor = collection.find()
+                    .batchSize(1)
+                    .cursor();
+
+            cursor.next();
+            assertThrows(MongoSocketReadTimeoutException.class, cursor::next);
+            cursor.close();
+        }
+
+        List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
+        assertEquals(2, events.size(), "Actual events: " + events.stream()
+                .map(CommandStartedEvent::getCommandName)
+                .collect(Collectors.toList()));
+        assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count());
+        assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count());
+
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @DisplayName("KillCursors is not executed after getMore network error")
+    @Test
+    public void testKillCursorsIsNotExecutedAfterGetMoreNetworkError() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeTrue(isServerlessTest());
+
+        long rtt = ClusterFixture.getPrimaryRTT();
+        collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions());
+        collectionHelper.insertDocuments(new Document(), new Document());
+        collectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"failCommand\","
+                + "    mode: { times: 1},"
+                + "    data: {"
+                + "        failCommands: [\"getMore\" ],"
+                + "        blockConnection: true,"
+                + "        blockTimeMS: " + (rtt + applyTimeoutMultiplierForServerless(600))
+                + "    }"
+                + "}");
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                .timeout(applyTimeoutMultiplierForServerless(500), TimeUnit.MILLISECONDS))) {
+
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary());
+
+            MongoCursor<Document> cursor = collection.find()
+                    .batchSize(1)
+                    .cursor();
+
+            cursor.next();
+            assertThrows(MongoOperationTimeoutException.class, cursor::next);
+            cursor.close();
+        }
+
+        List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
+        assertEquals(2, events.size(), "Actual events: " + events.stream()
+                .map(CommandStartedEvent::getCommandName)
+                .collect(Collectors.toList()));
+        assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count());
+        assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count());
+
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @Test
+    @DisplayName("Should throw timeout exception for subsequent commit transaction")
+    public void shouldThrowTimeoutExceptionForSubsequentCommitTransaction() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        assumeFalse(isStandalone());
+
+        try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) {
+            MongoCollection<Document> collection = mongoClient.getDatabase(namespace.getDatabaseName())
+                    .getCollection(namespace.getCollectionName());
+
+            try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder()
+                    .defaultTimeout(applyTimeoutMultiplierForServerless(200), TimeUnit.MILLISECONDS)
+                    .build())) {
+                session.startTransaction(TransactionOptions.builder().build());
+                collection.insertOne(session, new Document("x", 1));
+                sleep(applyTimeoutMultiplierForServerless(200));
+
+                assertDoesNotThrow(session::commitTransaction);
+
+                collectionHelper.runAdminCommand("{"
+                        + "  configureFailPoint: \"failCommand\","
+                        + "  mode: { times: 1 },"
+                        + "  data: {"
+                        + "    failCommands: [\"commitTransaction\"],"
+                        + "    blockConnection: true,"
+                        + "    blockTimeMS: " + applyTimeoutMultiplierForServerless(500)
+                        + "  }"
+                        + "}");
+
+                //repeat commit.
+                assertThrows(MongoOperationTimeoutException.class, session::commitTransaction);
+            }
+        }
+        List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction");
+        assertEquals(2, commandStartedEvents.size());
+
+        List<CommandFailedEvent> failedEvents = commandListener.getCommandFailedEvents("commitTransaction");
+        assertEquals(1, failedEvents.size());
+    }
+
+    private static Stream<Arguments> test8ServerSelectionArguments() {
+        return Stream.of(
+                Arguments.of(Named.of("serverSelectionTimeoutMS honored if timeoutMS is not set",
+                        "mongodb://invalid/?serverSelectionTimeoutMS=10")),
+                Arguments.of(Named.of("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS",
+                        "mongodb://invalid/?timeoutMS=200&serverSelectionTimeoutMS=10")),
+                Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS",
+                        "mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=200")),
+                Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if timeoutMS=0",
+                        "mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10"))
+
+        );
+    }
+
+    private static Stream<Arguments> test8ServerSelectionHandshakeArguments() {
+        return Stream.of(
+                Arguments.of("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", 200, 300),
+                Arguments.of("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", 300, 200)
+        );
+    }
+
+    protected MongoNamespace generateNamespace() {
+        return new MongoNamespace(getDefaultDatabaseName(),
+                getClass().getSimpleName() + "_" + COUNTER.incrementAndGet());
+    }
+
+    protected MongoClientSettings.Builder getMongoClientSettingsBuilder() {
+        commandListener.reset();
+        return Fixture.getMongoClientSettingsBuilder()
+                .readConcern(ReadConcern.MAJORITY)
+                .writeConcern(WriteConcern.MAJORITY)
+                .readPreference(ReadPreference.primary())
+                .addCommandListener(commandListener);
+    }
+
+    @BeforeEach
+    public void setUp() {
+        namespace = generateNamespace();
+        gridFsFileNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".files");
+        gridFsChunksNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".chunks");
+
+        collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace);
+        filesCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsFileNamespace);
+        chunksCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsChunksNamespace);
+        commandListener = new TestCommandListener();
+    }
+
+    @AfterEach
+    public void tearDown() {
+        ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME);
+        if (collectionHelper != null) {
+            collectionHelper.drop();
+            filesCollectionHelper.drop();
+            chunksCollectionHelper.drop();
+            commandListener.reset();
+            try {
+                ServerHelper.checkPool(getPrimary());
+            } catch (InterruptedException e) {
+                // ignore
+            }
+        }
+    }
+
+    @AfterAll
+    public static void finalTearDown() {
+        CollectionHelper.dropDatabase(getDefaultDatabaseName());
+    }
+
+    private MongoClient createMongoClient(final MongoClientSettings.Builder builder) {
+        return createMongoClient(builder.build());
+    }
+
+   private long msElapsedSince(final long t1) {
+        return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t1);
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java
index 894d291a743..0aa2ff28536 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java
@@ -18,7 +18,7 @@
 import com.mongodb.ConnectionString;
 import com.mongodb.MongoClientSettings;
 import com.mongodb.ServerAddress;
-import com.mongodb.event.CommandEvent;
+import com.mongodb.event.CommandStartedEvent;
 import com.mongodb.internal.connection.TestCommandListener;
 import org.bson.BsonArray;
 import org.bson.BsonBoolean;
@@ -133,7 +133,7 @@ private static Map<ServerAddress, Double> doSelections(final MongoCollection<Doc
         for (Future<Boolean> result : results) {
             result.get();
         }
-        List<CommandEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+        List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
         assertEquals(tasks * opsPerTask, commandStartedEvents.size());
         return commandStartedEvents.stream()
                 .collect(groupingBy(event -> event.getConnectionDescription().getServerAddress()))
diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java
index e6c9b66d1b1..e927192ac8d 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionTest.java
@@ -41,6 +41,7 @@ protected MongoDatabase getDatabase(final String databaseName) {
 
     @After
     public void cleanUp() {
+        super.cleanUp();
         if (mongoClient != null) {
             mongoClient.close();
         }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java
new file mode 100644
index 00000000000..fc80e2f1139
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client;
+
+import com.mongodb.MongoClientSettings;
+import com.mongodb.client.gridfs.GridFSBucket;
+import com.mongodb.client.gridfs.GridFSBuckets;
+
+
+/**
+ * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.rst#prose-tests
+ */
+public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest {
+
+    @Override
+    protected MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) {
+        return MongoClients.create(mongoClientSettings);
+    }
+
+    @Override
+    protected GridFSBucket createGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) {
+        return GridFSBuckets.create(mongoDatabase, bucketName);
+    }
+
+    @Override
+    protected boolean isAsync() {
+        return false;
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java
new file mode 100644
index 00000000000..c4068375f9f
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client;
+
+import com.mongodb.ClusterFixture;
+import com.mongodb.client.unified.UnifiedSyncTest;
+import org.junit.jupiter.params.provider.Arguments;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.Collection;
+
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+
+
+// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests
+public class ClientSideOperationTimeoutTest extends UnifiedSyncTest {
+
+    private static Collection<Arguments> data() throws URISyntaxException, IOException {
+        return getTestData("unified-test-format/client-side-operation-timeout");
+    }
+
+    @Override
+    protected void skips(final String fileDescription, final String testDescription) {
+        skipOperationTimeoutTests(fileDescription, testDescription);
+    }
+
+    public static void skipOperationTimeoutTests(final String fileDescription, final String testDescription) {
+
+        if (ClusterFixture.isServerlessTest()) {
+
+            // It is not possible to create capped collections on serverless instances.
+            assumeFalse(fileDescription.equals("timeoutMS behaves correctly for tailable awaitData cursors"));
+            assumeFalse(fileDescription.equals("timeoutMS behaves correctly for tailable non-awaitData cursors"));
+
+            /* Drivers MUST NOT execute a killCursors command because the pinned connection is no longer under a load balancer. */
+            assumeFalse(testDescription.equals("timeoutMS is refreshed for close"));
+
+            /* Flaky tests. We have to retry them once we have a Junit5 rule. */
+            assumeFalse(testDescription.equals("remaining timeoutMS applied to getMore if timeoutMode is unset"));
+            assumeFalse(testDescription.equals("remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime"));
+            assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if timeoutMode is iteration - success"));
+            assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if timeoutMode is iteration - failure"));
+        }
+        assumeFalse(testDescription.contains("maxTimeMS is ignored if timeoutMS is set - createIndex on collection"),
+                "No maxTimeMS parameter for createIndex() method");
+        assumeFalse(fileDescription.startsWith("runCursorCommand"), "No run cursor command");
+        assumeFalse(testDescription.contains("runCommand on database"), "No special handling of runCommand");
+        assumeFalse(testDescription.endsWith("count on collection"), "No count command helper");
+        assumeFalse(fileDescription.equals("timeoutMS can be overridden for an operation"), "No operation based overrides");
+        assumeFalse(testDescription.equals("timeoutMS can be overridden for commitTransaction")
+                        || testDescription.equals("timeoutMS applied to abortTransaction"),
+                "No operation session based overrides");
+        assumeFalse(fileDescription.equals("timeoutMS behaves correctly when closing cursors")
+                && testDescription.equals("timeoutMS can be overridden for close"), "No operation based overrides");
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java
index 4ab1d179611..cea89765756 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java
@@ -17,7 +17,6 @@
 package com.mongodb.client;
 
 import com.mongodb.ReadConcern;
-import com.mongodb.event.CommandEvent;
 import com.mongodb.event.CommandStartedEvent;
 import com.mongodb.internal.connection.TestCommandListener;
 import org.bson.BsonDocument;
@@ -60,7 +59,7 @@ public void shouldIncludeReadConcernInCommand() {
         mongoClient.getDatabase(getDefaultDatabaseName()).getCollection("test")
                 .withReadConcern(ReadConcern.LOCAL).find().into(new ArrayList<>());
 
-        List<CommandEvent> events = commandListener.getCommandStartedEvents();
+        List<CommandStartedEvent> events = commandListener.getCommandStartedEvents();
 
         BsonDocument commandDocument = new BsonDocument("find", new BsonString("test"))
                 .append("readConcern", ReadConcern.LOCAL.asDocument())
diff --git a/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java
index 4b7dc8d9310..cf8c3bfc292 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java
@@ -26,9 +26,10 @@
 import com.mongodb.event.ServerHeartbeatSucceededEvent;
 import com.mongodb.event.ServerListener;
 import com.mongodb.event.ServerMonitorListener;
-import com.mongodb.internal.time.Timeout;
 import com.mongodb.internal.diagnostics.logging.Logger;
 import com.mongodb.internal.diagnostics.logging.Loggers;
+import com.mongodb.internal.time.TimePointTest;
+import com.mongodb.internal.time.Timeout;
 import com.mongodb.lang.Nullable;
 import org.bson.BsonArray;
 import org.bson.BsonDocument;
@@ -56,6 +57,7 @@
 import static com.mongodb.client.Fixture.getDefaultDatabaseName;
 import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder;
 import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
 import static java.lang.String.format;
 import static java.util.Arrays.asList;
 import static java.util.Collections.singleton;
@@ -267,21 +269,14 @@ public void monitorsSleepAtLeastMinHeartbeatFreqencyMSBetweenChecks() {
 
     private static void assertPoll(final BlockingQueue<?> queue, @Nullable final Class<?> allowed, final Set<Class<?>> required)
             throws InterruptedException {
-        assertPoll(queue, allowed, required, Timeout.startNow(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS));
+        assertPoll(queue, allowed, required, Timeout.expiresIn(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS, ZERO_DURATION_MEANS_EXPIRED));
     }
 
     private static void assertPoll(final BlockingQueue<?> queue, @Nullable final Class<?> allowed, final Set<Class<?>> required,
                                    final Timeout timeout) throws InterruptedException {
         Set<Class<?>> encountered = new HashSet<>();
         while (true) {
-            Object element;
-            if (timeout.isImmediate()) {
-                element = queue.poll();
-            } else if (timeout.isInfinite()) {
-                element = queue.take();
-            } else {
-                element = queue.poll(timeout.remaining(NANOSECONDS), NANOSECONDS);
-            }
+            Object element = poll(queue, timeout);
             if (element != null) {
                 if (LOGGER.isInfoEnabled()) {
                     LOGGER.info("Polled " + element);
@@ -299,12 +294,29 @@ private static void assertPoll(final BlockingQueue<?> queue, @Nullable final Cla
                     return;
                 }
             }
-            if (timeout.expired()) {
+            if (TimePointTest.hasExpired(timeout)) {
                 fail(format("encountered %s, required %s", encountered, required));
             }
         }
     }
 
+    @Nullable
+    private static Object poll(final BlockingQueue<?> queue, final Timeout timeout) throws InterruptedException {
+        long remainingNs = timeout.call(NANOSECONDS,
+                () -> -1L,
+                (ns) -> ns,
+                () -> 0L);
+        Object element;
+        if (remainingNs == -1) {
+            element = queue.take();
+        } else if (remainingNs == 0) {
+            element = queue.poll();
+        } else {
+            element = queue.poll(remainingNs, NANOSECONDS);
+        }
+        return element;
+    }
+
     private static Optional<Class<?>> findAssignable(final Class<?> from, final Set<Class<?>> toAnyOf) {
         return toAnyOf.stream().filter(to -> to.isAssignableFrom(from)).findAny();
     }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java
index f9093dc4ae5..b09edf4ac43 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java
@@ -16,17 +16,25 @@
 
 package com.mongodb.client;
 
+import com.mongodb.ClientSessionOptions;
+import com.mongodb.MongoClientException;
 import com.mongodb.MongoException;
+import com.mongodb.TransactionOptions;
 import com.mongodb.client.internal.ClientSessionClock;
+import com.mongodb.client.model.Sorts;
 import org.bson.Document;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.ClusterFixture.TIMEOUT;
 import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet;
 import static com.mongodb.ClusterFixture.isServerlessTest;
 import static com.mongodb.ClusterFixture.isSharded;
 import static com.mongodb.ClusterFixture.serverVersionAtLeast;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assumptions.assumeFalse;
@@ -162,6 +170,43 @@ public void testRetryTimeoutEnforcedTransientTransactionErrorOnCommit() {
         }
     }
 
+    //
+    // Ensure cannot override timeout in transaction
+    //
+    @Test
+    public void testTimeoutMS() {
+        try (ClientSession session = client.startSession(ClientSessionOptions.builder()
+                .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build())
+                .build())) {
+            assertThrows(MongoClientException.class, () -> session.withTransaction(() -> {
+                collection.insertOne(session, Document.parse("{ _id : 1 }"));
+                collection.withTimeout(2, TimeUnit.MINUTES).find(session).first();
+                return -1;
+            }));
+        }
+    }
+
+    //
+    // Ensure legacy settings don't cause issues in sessions
+    //
+    @Test
+    public void testTimeoutMSAndLegacySettings() {
+        try (ClientSession session = client.startSession(ClientSessionOptions.builder()
+                .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build())
+                .build())) {
+            Document document = Document.parse("{ _id : 1 }");
+            Document returnValueFromCallback = session.withTransaction(() -> {
+                collection.insertOne(session, document);
+                Document found = collection.find(session)
+                        .maxAwaitTime(1, TimeUnit.MINUTES)
+                        .sort(Sorts.descending("_id"))
+                        .first();
+                return found != null ? found : new Document();
+            });
+            assertEquals(document, returnValueFromCallback);
+        }
+    }
+
     private boolean canRunTests() {
         if (isSharded()) {
             return serverVersionAtLeast(4, 2);
diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java
new file mode 100644
index 00000000000..31f72ca4332
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.csot;
+
+import com.mongodb.AutoEncryptionSettings;
+import com.mongodb.ClientEncryptionSettings;
+import com.mongodb.ClusterFixture;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.MongoNamespace;
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.MongoUpdatedEncryptedFieldsException;
+import com.mongodb.ReadConcern;
+import com.mongodb.ReadPreference;
+import com.mongodb.WriteConcern;
+import com.mongodb.client.Fixture;
+import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCollection;
+import com.mongodb.client.MongoDatabase;
+import com.mongodb.client.model.CreateCollectionOptions;
+import com.mongodb.client.model.CreateEncryptedCollectionParams;
+import com.mongodb.client.model.ValidationOptions;
+import com.mongodb.client.model.vault.DataKeyOptions;
+import com.mongodb.client.model.vault.EncryptOptions;
+import com.mongodb.client.test.CollectionHelper;
+import com.mongodb.client.vault.ClientEncryption;
+import com.mongodb.event.CommandStartedEvent;
+import com.mongodb.internal.connection.TestCommandListener;
+import org.bson.BsonBinary;
+import org.bson.BsonDocument;
+import org.bson.BsonString;
+import org.bson.Document;
+import org.bson.codecs.BsonDocumentCodec;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.ClusterFixture.applyTimeoutMultiplierForServerless;
+import static com.mongodb.ClusterFixture.serverVersionAtLeast;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.lessThan;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+/**
+ * See
+ * <a href="https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.rst#clientencryption">Prose Tests</a>.
+ */
+public abstract class AbstractClientSideOperationsEncryptionTimeoutProseTest {
+
+    protected static final String FAIL_COMMAND_NAME = "failCommand";
+    private static final Map<String, Map<String, Object>> KMS_PROVIDERS = new HashMap<>();
+
+    private final MongoNamespace keyVaultNamespace = new MongoNamespace("keyvault", "datakeys");
+
+    private CollectionHelper<BsonDocument> keyVaultCollectionHelper;
+
+    private TestCommandListener commandListener;
+
+    private static final String MASTER_KEY = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5a"
+            + "XRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk";
+
+    protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings.Builder builder);
+
+    protected abstract MongoClient createMongoClient(MongoClientSettings.Builder builder);
+
+    @Test
+    void shouldThrowOperationTimeoutExceptionWhenCreateDataKey() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        Map<String, Map<String, Object>> kmsProviders = new HashMap<>();
+        Map<String, Object> localProviderMap = new HashMap<>();
+        localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY));
+        kmsProviders.put("local", localProviderMap);
+
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 100))) {
+
+            keyVaultCollectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                    + "  mode: { times: 1 },"
+                    + "  data: {"
+                    + "    failCommands: [\"insert\"],"
+                    + "    blockConnection: true,"
+                    + "    blockTimeMS: " + (rtt + 100)
+                    + "  }"
+                    + "}");
+
+            assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.createDataKey("local"));
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            assertEquals(1, commandStartedEvents.size());
+            assertEquals(keyVaultNamespace.getCollectionName(),
+                    commandStartedEvents.get(0).getCommand().get("insert").asString().getValue());
+            assertNotNull(commandListener.getCommandFailedEvent("insert"));
+        }
+
+    }
+
+    @Test
+    void shouldThrowOperationTimeoutExceptionWhenEncryptData() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 150))) {
+
+            clientEncryption.createDataKey("local");
+
+            keyVaultCollectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                    + "  mode: { times: 1 },"
+                    + "  data: {"
+                    + "    failCommands: [\"find\"],"
+                    + "    blockConnection: true,"
+                    + "    blockTimeMS: " + (rtt + 150)
+                    + "  }"
+                    + "}");
+
+            BsonBinary dataKey = clientEncryption.createDataKey("local");
+
+            EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic");
+            encryptOptions.keyId(dataKey);
+            commandListener.reset();
+            assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.encrypt(new BsonString("hello"), encryptOptions));
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            assertEquals(1, commandStartedEvents.size());
+            assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue());
+            assertNotNull(commandListener.getCommandFailedEvent("find"));
+        }
+
+    }
+
+    @Test
+    void shouldThrowOperationTimeoutExceptionWhenDecryptData() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+
+        BsonBinary encrypted;
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) {
+            clientEncryption.createDataKey("local");
+            BsonBinary dataKey = clientEncryption.createDataKey("local");
+            EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic");
+            encryptOptions.keyId(dataKey);
+            encrypted = clientEncryption.encrypt(new BsonString("hello"), encryptOptions);
+        }
+
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) {
+            keyVaultCollectionHelper.runAdminCommand("{"
+                    + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                    + "  mode: { times: 1 },"
+                    + "  data: {"
+                    + "    failCommands: [\"find\"],"
+                    + "    blockConnection: true,"
+                    + "    blockTimeMS: " + (rtt + 500)
+                    + "  }"
+                    + "}");
+            commandListener.reset();
+            assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.decrypt(encrypted));
+
+            List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+            assertEquals(1, commandStartedEvents.size());
+            assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue());
+            assertNotNull(commandListener.getCommandFailedEvent("find"));
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @Test
+    void shouldDecreaseOperationTimeoutForSubsequentOperations() {
+        assumeTrue(serverVersionAtLeast(4, 4));
+        long rtt = ClusterFixture.getPrimaryRTT();
+        long initialTimeoutMS = rtt + 2500;
+
+        keyVaultCollectionHelper.runAdminCommand("{"
+                + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                + "  mode: \"alwaysOn\","
+                + "  data: {"
+                + "    failCommands: [\"insert\", \"find\", \"listCollections\"],"
+                + "    blockConnection: true,"
+                + "    blockTimeMS: " + (rtt + 10)
+                + "  }"
+                + "}");
+
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder()
+                .timeout(initialTimeoutMS, MILLISECONDS))) {
+            BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions());
+            String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData());
+
+            final String dbName = "test";
+            final String collName = "coll";
+
+            AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder()
+                    .keyVaultNamespace(keyVaultNamespace.getFullName())
+                    .keyVaultMongoClientSettings(getMongoClientSettingsBuilder()
+                            .build())
+                    .kmsProviders(KMS_PROVIDERS)
+                    .build();
+
+            try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                    .autoEncryptionSettings(autoEncryptionSettings)
+                    .timeout(initialTimeoutMS, MILLISECONDS))) {
+
+                CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions();
+                createCollectionOptions.validationOptions(new ValidationOptions()
+                        .validator(new BsonDocument("$jsonSchema", BsonDocument.parse("{"
+                                + "  properties: {"
+                                + "    encryptedField: {"
+                                + "      encrypt: {"
+                                + "        keyId: [{"
+                                + "          \"$binary\": {"
+                                + "            \"base64\": \"" + base64DataKeyId + "\","
+                                + "            \"subType\": \"04\""
+                                + "          }"
+                                + "        }],"
+                                + "        bsonType: \"string\","
+                                + "        algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\""
+                                + "      }"
+                                + "    }"
+                                + "  },"
+                                + "  \"bsonType\": \"object\""
+                                + "}"))));
+
+                MongoCollection<Document> collection = mongoClient.getDatabase(dbName).getCollection(collName);
+                collection.drop();
+
+                mongoClient.getDatabase(dbName).createCollection(collName, createCollectionOptions);
+
+                commandListener.reset();
+                collection.insertOne(new Document("encryptedField", "123456789"));
+
+                List<CommandStartedEvent> commandStartedEvents = commandListener.getCommandStartedEvents();
+                assertTimeoutIsDecreasingForCommands(Arrays.asList("listCollections", "find", "insert"), commandStartedEvents,
+                        initialTimeoutMS);
+            }
+        }
+    }
+
+    /**
+     * Not a prose spec test. However, it is additional test case for better coverage.
+     */
+    @ParameterizedTest
+    @ValueSource(strings = {"insert", "create"})
+    void shouldThrowTimeoutExceptionWhenCreateEncryptedCollection(final String commandToTimeout) {
+        assumeTrue(serverVersionAtLeast(7, 0));
+        //given
+        long rtt = ClusterFixture.getPrimaryRTT();
+        long initialTimeoutMS = rtt + applyTimeoutMultiplierForServerless(200);
+
+        try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder()
+                .timeout(initialTimeoutMS, MILLISECONDS))) {
+            final String dbName = "test";
+            final String collName = "coll";
+
+            try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder()
+                    .timeout(initialTimeoutMS, MILLISECONDS))) {
+                CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse(
+                        "{"
+                                + "  fields: [{"
+                                + "    path: 'ssn',"
+                                + "    bsonType: 'string',"
+                                + "    keyId: null"
+                                + "  }]"
+                                + "}"));
+
+                keyVaultCollectionHelper.runAdminCommand("{"
+                        + "    configureFailPoint: \"" + FAIL_COMMAND_NAME + "\","
+                        + "  mode: { times: 1 },"
+                        + "  data: {"
+                        + "    failCommands: [\"" + commandToTimeout + "\"],"
+                        + "    blockConnection: true,"
+                        + "    blockTimeMS: " + initialTimeoutMS
+                        + "  }"
+                        + "}");
+
+                MongoDatabase database = mongoClient.getDatabase(dbName);
+                database.getCollection(collName).drop();
+                commandListener.reset();
+
+                //when
+                MongoUpdatedEncryptedFieldsException encryptionException = assertThrows(MongoUpdatedEncryptedFieldsException.class, () ->
+                        clientEncryption.createEncryptedCollection(database, collName, createCollectionOptions,
+                                new CreateEncryptedCollectionParams("local")));
+                //then
+                assertInstanceOf(MongoOperationTimeoutException.class, encryptionException.getCause());
+            }
+        }
+    }
+
+    private static void assertTimeoutIsDecreasingForCommands(final List<String> commandNames,
+                                                             final List<CommandStartedEvent> commandStartedEvents,
+                                                             final long initialTimeoutMs) {
+        long previousMaxTimeMS = initialTimeoutMs;
+        assertEquals(commandNames.size(), commandStartedEvents.size(), "There have been more commands then expected");
+        for (int i = 0; i < commandStartedEvents.size(); i++) {
+            CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i);
+            String expectedCommandName = commandNames.get(i);
+            assertEquals(expectedCommandName, commandStartedEvent.getCommandName());
+
+            BsonDocument command = commandStartedEvent.getCommand();
+            assertTrue(command.containsKey("maxTimeMS"), "Command " + expectedCommandName + " should have maxTimeMS set");
+
+            long maxTimeMS = command.getInt64("maxTimeMS").getValue();
+
+            if (i > 0) {
+                assertThat(commandStartedEvent.getCommandName() + " " + "maxTimeMS should be less than that of a previous "
+                        + commandStartedEvents.get(i - 1).getCommandName() + " command", maxTimeMS, lessThan(previousMaxTimeMS));
+            } else {
+                assertThat("maxTimeMS should be less than the configured timeout " + initialTimeoutMs + "ms",
+                        maxTimeMS, lessThan(previousMaxTimeMS));
+            }
+            previousMaxTimeMS = maxTimeMS;
+        }
+    }
+
+    protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder(final long vaultTimeout) {
+        return ClientEncryptionSettings
+                .builder()
+                .keyVaultNamespace(keyVaultNamespace.getFullName())
+                .keyVaultMongoClientSettings(getMongoClientSettingsBuilder()
+                        .timeout(vaultTimeout, TimeUnit.MILLISECONDS).build())
+                .kmsProviders(KMS_PROVIDERS);
+    }
+
+    protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder() {
+        return ClientEncryptionSettings
+                .builder()
+                .keyVaultNamespace(keyVaultNamespace.getFullName())
+                .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build())
+                .kmsProviders(KMS_PROVIDERS);
+    }
+
+    protected MongoClientSettings.Builder getMongoClientSettingsBuilder() {
+        return Fixture.getMongoClientSettingsBuilder()
+                .readConcern(ReadConcern.MAJORITY)
+                .writeConcern(WriteConcern.MAJORITY)
+                .readPreference(ReadPreference.primary())
+                .addCommandListener(commandListener);
+    }
+
+    @BeforeEach
+    public void setUp() {
+        Map<String, Object> localProviderMap = new HashMap<>();
+        localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY));
+        KMS_PROVIDERS.put("local", localProviderMap);
+
+        keyVaultCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), keyVaultNamespace);
+        keyVaultCollectionHelper.create();
+        commandListener = new TestCommandListener();
+    }
+
+    @AfterEach
+    public void tearDown() {
+        ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME);
+        if (keyVaultCollectionHelper != null) {
+            keyVaultCollectionHelper.drop();
+        }
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java
new file mode 100644
index 00000000000..25a1102914a
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.csot;
+
+import com.mongodb.ClientEncryptionSettings;
+import com.mongodb.MongoClientSettings;
+import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoClients;
+import com.mongodb.client.vault.ClientEncryption;
+import com.mongodb.client.vault.ClientEncryptions;
+
+public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest {
+    public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) {
+        return ClientEncryptions.create(builder.build());
+    }
+
+    @Override
+    protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) {
+        return MongoClients.create(builder.build());
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java
index f3aef9ec257..1890b2e48a3 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java
@@ -27,9 +27,9 @@
 import com.mongodb.ServerApi;
 import com.mongodb.ServerApiVersion;
 import com.mongodb.TransactionOptions;
-import com.mongodb.WriteConcern;
 import com.mongodb.client.ClientSession;
 import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCluster;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.MongoCursor;
 import com.mongodb.client.MongoDatabase;
@@ -64,6 +64,7 @@
 import com.mongodb.internal.connection.TestServerListener;
 import com.mongodb.internal.logging.LogMessage;
 import com.mongodb.lang.NonNull;
+import com.mongodb.lang.Nullable;
 import com.mongodb.logging.TestLoggingInterceptor;
 import org.bson.BsonArray;
 import org.bson.BsonBoolean;
@@ -261,6 +262,18 @@ public MongoCollection<BsonDocument> getCollection(final String id) {
         return getEntity(id, collections, "collection");
     }
 
+    public MongoCluster getMongoClusterWithTimeoutMS(final String id, @Nullable final Long timeoutMS) {
+        return timeoutMS != null ? getClient(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getClient(id);
+    }
+
+    public MongoDatabase getDatabaseWithTimeoutMS(final String id, @Nullable final Long timeoutMS) {
+        return timeoutMS != null ? getDatabase(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getDatabase(id);
+    }
+
+    public MongoCollection<BsonDocument> getCollectionWithTimeoutMS(final String id, @Nullable final Long timeoutMS) {
+        return timeoutMS != null ? getCollection(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getCollection(id);
+    }
+
     public ClientSession getSession(final String id) {
         return getEntity(id, sessions, "session");
     }
@@ -471,11 +484,17 @@ private void initClient(final BsonDocument entity, final String id,
                         break;
                     case "w":
                         if (value.isString()) {
-                            clientSettingsBuilder.writeConcern(new WriteConcern(value.asString().getValue()));
+                            clientSettingsBuilder.writeConcern(clientSettingsBuilder.build()
+                                    .getWriteConcern().withW(value.asString().getValue()));
                         } else {
-                            clientSettingsBuilder.writeConcern(new WriteConcern(value.asInt32().intValue()));
+                            clientSettingsBuilder.writeConcern(clientSettingsBuilder.build()
+                                    .getWriteConcern().withW(value.asInt32().intValue()));
                         }
                         break;
+                    case "wTimeoutMS":
+                        clientSettingsBuilder.writeConcern(clientSettingsBuilder.build().getWriteConcern()
+                                .withWTimeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS));
+                        break;
                     case "maxPoolSize":
                         clientSettingsBuilder.applyToConnectionPoolSettings(builder -> builder.maxSize(value.asNumber().intValue()));
                         break;
@@ -519,6 +538,9 @@ private void initClient(final BsonDocument entity, final String id,
                     case "appName":
                         clientSettingsBuilder.applicationName(value.asString().getValue());
                         break;
+                    case "timeoutMS":
+                        clientSettingsBuilder.timeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
                     case "serverMonitoringMode":
                         clientSettingsBuilder.applyToServerSettings(builder -> builder.serverMonitoringMode(
                                 ServerMonitoringModeUtil.fromString(value.asString().getValue())));
@@ -631,6 +653,9 @@ private void initDatabase(final BsonDocument entity, final String id) {
                     case "writeConcern":
                         database = database.withWriteConcern(asWriteConcern(entry.getValue().asDocument()));
                         break;
+                    case "timeoutMS":
+                        database = database.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
                     default:
                         throw new UnsupportedOperationException("Unsupported database option: " + entry.getKey());
                 }
@@ -655,6 +680,9 @@ private void initCollection(final BsonDocument entity, final String id) {
                     case "writeConcern":
                         collection = collection.withWriteConcern(asWriteConcern(entry.getValue().asDocument()));
                         break;
+                    case "timeoutMS":
+                        collection = collection.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
                     default:
                         throw new UnsupportedOperationException("Unsupported collection option: " + entry.getKey());
                 }
@@ -675,6 +703,9 @@ private void initSession(final BsonDocument entity, final String id, final BsonD
                     case "snapshot":
                         optionsBuilder.snapshot(entry.getValue().asBoolean().getValue());
                         break;
+                    case "defaultTimeoutMS":
+                        optionsBuilder.defaultTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
                     case "causalConsistency":
                         optionsBuilder.causallyConsistent(entry.getValue().asBoolean().getValue());
                         break;
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java
index 7c0d340a9ad..75d264487f8 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java
@@ -22,6 +22,7 @@
 import com.mongodb.MongoException;
 import com.mongodb.MongoSecurityException;
 import com.mongodb.MongoExecutionTimeoutException;
+import com.mongodb.MongoOperationTimeoutException;
 import com.mongodb.MongoServerException;
 import com.mongodb.MongoSocketException;
 import com.mongodb.MongoWriteConcernException;
@@ -42,7 +43,7 @@
 final class ErrorMatcher {
     private static final Set<String> EXPECTED_ERROR_FIELDS = new HashSet<>(
             asList("isError", "expectError", "isClientError", "errorCode", "errorCodeName", "errorContains", "errorResponse",
-                    "isClientError", "errorLabelsOmit", "errorLabelsContain", "expectResult"));
+                    "isClientError", "isTimeoutError", "errorLabelsOmit", "errorLabelsContain", "expectResult"));
 
     private final AssertionContext context;
     private final ValueMatcher valueMatcher;
@@ -68,6 +69,14 @@ void assertErrorsMatch(final BsonDocument expectedError, final Exception e) {
                     e instanceof MongoClientException || e instanceof IllegalArgumentException || e instanceof IllegalStateException
                             || e instanceof MongoSocketException);
         }
+
+        if (expectedError.containsKey("isTimeoutError")) {
+            assertEquals(context.getMessage("Exception must be of type MongoOperationTimeoutException when checking for results"),
+                    expectedError.getBoolean("isTimeoutError").getValue(),
+                    e instanceof MongoOperationTimeoutException
+            );
+        }
+
         if (expectedError.containsKey("errorContains")) {
             String errorContains = expectedError.getString("errorContains").getValue();
             assertTrue(context.getMessage("Error message does not contain expected string: " + errorContains),
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java
index 63e07ca2fb2..67f95903997 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java
@@ -16,6 +16,7 @@
 
 package com.mongodb.client.unified;
 
+import com.mongodb.CursorType;
 import com.mongodb.MongoNamespace;
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadConcernLevel;
@@ -37,11 +38,12 @@
 import com.mongodb.client.ListIndexesIterable;
 import com.mongodb.client.ListSearchIndexesIterable;
 import com.mongodb.client.MongoChangeStreamCursor;
-import com.mongodb.client.MongoClient;
+import com.mongodb.client.MongoCluster;
 import com.mongodb.client.MongoCollection;
 import com.mongodb.client.MongoCursor;
 import com.mongodb.client.MongoDatabase;
 import com.mongodb.client.MongoIterable;
+import com.mongodb.client.cursor.TimeoutMode;
 import com.mongodb.client.model.BulkWriteOptions;
 import com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions;
 import com.mongodb.client.model.ClusteredIndexOptions;
@@ -52,6 +54,7 @@
 import com.mongodb.client.model.DeleteManyModel;
 import com.mongodb.client.model.DeleteOneModel;
 import com.mongodb.client.model.DeleteOptions;
+import com.mongodb.client.model.DropIndexOptions;
 import com.mongodb.client.model.EstimatedDocumentCountOptions;
 import com.mongodb.client.model.FindOneAndDeleteOptions;
 import com.mongodb.client.model.FindOneAndReplaceOptions;
@@ -93,6 +96,8 @@
 import org.bson.codecs.ValueCodecProvider;
 import org.bson.codecs.configuration.CodecRegistries;
 
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -106,7 +111,8 @@
 import static java.util.Objects.requireNonNull;
 import static java.util.stream.Collectors.toList;
 
-final class UnifiedCrudHelper {
+@SuppressWarnings("deprecation")
+final class UnifiedCrudHelper extends UnifiedHelper {
     private final Entities entities;
     private final String testDescription;
     private final AtomicInteger uniqueIdGenerator = new AtomicInteger();
@@ -217,13 +223,13 @@ private ClientSession getSession(final BsonDocument arguments) {
 
 
     OperationResult executeListDatabases(final BsonDocument operation) {
-        MongoClient client = entities.getClient(operation.getString("object").getValue());
+        MongoCluster mongoCluster = getMongoCluster(operation);
 
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         ListDatabasesIterable<BsonDocument> iterable = session == null
-                ? client.listDatabases(BsonDocument.class)
-                : client.listDatabases(session, BsonDocument.class);
+                ? mongoCluster.listDatabases(BsonDocument.class)
+                : mongoCluster.listDatabases(session, BsonDocument.class);
 
         for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
             switch (cur.getKey()) {
@@ -242,13 +248,13 @@ OperationResult executeListDatabases(final BsonDocument operation) {
     }
 
     OperationResult executeListDatabaseNames(final BsonDocument operation) {
-        MongoClient client = entities.getClient(operation.getString("object").getValue());
+        MongoCluster mongoCluster = getMongoCluster(operation);
 
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         MongoIterable<String> iterable = session == null
-                ? client.listDatabaseNames()
-                : client.listDatabaseNames(session);
+                ? mongoCluster.listDatabaseNames()
+                : mongoCluster.listDatabaseNames(session);
 
         for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
             //noinspection SwitchStatementWithTooFewBranches
@@ -265,34 +271,40 @@ OperationResult executeListDatabaseNames(final BsonDocument operation) {
     }
 
     OperationResult executeListCollections(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
-
+        MongoDatabase database = getMongoDatabase(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         ListCollectionsIterable<BsonDocument> iterable = session == null
                 ? database.listCollections(BsonDocument.class)
                 : database.listCollections(session, BsonDocument.class);
-        for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
-            switch (cur.getKey()) {
-                case "session":
-                    break;
-                case "filter":
-                    iterable.filter(cur.getValue().asDocument());
-                    break;
-                case "batchSize":
-                    iterable.batchSize(cur.getValue().asNumber().intValue());
-                    break;
-                default:
-                    throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
+        return resultOf(() -> {
+            for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
+                switch (cur.getKey()) {
+                    case "session":
+                        break;
+                    case "filter":
+                        iterable.filter(cur.getValue().asDocument());
+                        break;
+                    case "batchSize":
+                        iterable.batchSize(cur.getValue().asNumber().intValue());
+                        break;
+                    case "timeoutMode":
+                        setTimeoutMode(iterable, cur);
+                        break;
+                    case "maxTimeMS":
+                        iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
+                    default:
+                        throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
+                }
             }
-        }
 
-        return resultOf(() ->
-                new BsonArray(iterable.into(new ArrayList<>())));
+            return new BsonArray(iterable.into(new ArrayList<>()));
+        });
     }
 
     OperationResult executeListCollectionNames(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        MongoDatabase database = getMongoDatabase(operation);
 
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
@@ -322,21 +334,21 @@ OperationResult executeListCollectionNames(final BsonDocument operation) {
     }
 
     OperationResult executeListIndexes(final BsonDocument operation) {
-        ListIndexesIterable<BsonDocument> iterable = createListIndexesIterable(operation);
-
-        return resultOf(() ->
-                new BsonArray(iterable.into(new ArrayList<>())));
+        return resultOf(() -> {
+            ListIndexesIterable<BsonDocument> iterable = createListIndexesIterable(operation);
+            return new BsonArray(iterable.into(new ArrayList<>()));
+        });
     }
 
     OperationResult executeListIndexNames(final BsonDocument operation) {
-        ListIndexesIterable<BsonDocument> iterable = createListIndexesIterable(operation);
-
-        return resultOf(() ->
-                new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList())));
+        return resultOf(() -> {
+            ListIndexesIterable<BsonDocument> iterable = createListIndexesIterable(operation);
+            return new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList()));
+        });
     }
 
     private ListIndexesIterable<BsonDocument> createListIndexesIterable(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         ListIndexesIterable<BsonDocument> iterable = session == null
@@ -349,6 +361,12 @@ private ListIndexesIterable<BsonDocument> createListIndexesIterable(final BsonDo
                 case "batchSize":
                     iterable.batchSize(cur.getValue().asNumber().intValue());
                     break;
+                case "timeoutMode":
+                    setTimeoutMode(iterable, cur);
+                    break;
+                case "maxTimeMS":
+                    iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                    break;
                 default:
                     throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
             }
@@ -357,19 +375,19 @@ private ListIndexesIterable<BsonDocument> createListIndexesIterable(final BsonDo
     }
 
     OperationResult executeFind(final BsonDocument operation) {
-        FindIterable<BsonDocument> iterable = createFindIterable(operation);
-        return resultOf(() ->
-                new BsonArray(iterable.into(new ArrayList<>())));
+        return resultOf(() -> {
+            FindIterable<BsonDocument> iterable = createFindIterable(operation);
+            return new BsonArray(iterable.into(new ArrayList<>()));
+        });
     }
 
     OperationResult executeFindOne(final BsonDocument operation) {
-        FindIterable<BsonDocument> iterable = createFindIterable(operation);
-        return resultOf(iterable::first);
+        return resultOf(() ->  createFindIterable(operation).first());
     }
 
     OperationResult createFindCursor(final BsonDocument operation) {
-        FindIterable<BsonDocument> iterable = createFindIterable(operation);
         return resultOf(() -> {
+            FindIterable<BsonDocument> iterable = createFindIterable(operation);
             entities.addCursor(operation.getString("saveResultAsEntity", new BsonString(createRandomEntityId())).getValue(),
                     iterable.cursor());
             return null;
@@ -378,7 +396,7 @@ OperationResult createFindCursor(final BsonDocument operation) {
 
     @NonNull
     private FindIterable<BsonDocument> createFindIterable(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument filter = arguments.getDocument("filter");
@@ -400,6 +418,9 @@ private FindIterable<BsonDocument> createFindIterable(final BsonDocument operati
                 case "maxTimeMS":
                     iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS);
                     break;
+                case "maxAwaitTimeMS":
+                    iterable.maxAwaitTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "skip":
                     iterable.skip(cur.getValue().asInt32().intValue());
                     break;
@@ -437,6 +458,12 @@ private FindIterable<BsonDocument> createFindIterable(final BsonDocument operati
                 case "showRecordId":
                     iterable.showRecordId(cur.getValue().asBoolean().getValue());
                     break;
+                case "cursorType":
+                    setCursorType(iterable, cur);
+                    break;
+                case "timeoutMode":
+                    setTimeoutMode(iterable, cur);
+                    break;
                 default:
                     throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
             }
@@ -444,8 +471,9 @@ private FindIterable<BsonDocument> createFindIterable(final BsonDocument operati
         return iterable;
     }
 
+    @SuppressWarnings("deprecation") //maxTimeMS
     OperationResult executeDistinct(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
 
@@ -465,6 +493,9 @@ OperationResult executeDistinct(final BsonDocument operation) {
                 case "filter":
                     iterable.filter(cur.getValue().asDocument());
                     break;
+                case "maxTimeMS":
+                    iterable.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "collation":
                     iterable.collation(asCollation(cur.getValue().asDocument()));
                     break;
@@ -479,8 +510,8 @@ OperationResult executeDistinct(final BsonDocument operation) {
 
     @SuppressWarnings("deprecation")
     OperationResult executeMapReduce(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
-        BsonDocument arguments = operation.getDocument("arguments");
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
+        BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
 
         String mapFunction = arguments.get("map").asJavaScript().getCode();
@@ -509,8 +540,9 @@ OperationResult executeMapReduce(final BsonDocument operation) {
                 new BsonArray(iterable.into(new ArrayList<>())));
     }
 
+    @SuppressWarnings("deprecation") //maxTimeMS
     OperationResult executeFindOneAndUpdate(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
 
         BsonDocument filter = arguments.getDocument("filter").asDocument();
@@ -558,6 +590,9 @@ OperationResult executeFindOneAndUpdate(final BsonDocument operation) {
                 case "let":
                     options.let(cur.getValue().asDocument());
                     break;
+                case "maxTimeMS":
+                    options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "collation":
                     options.collation(asCollation(cur.getValue().asDocument()));
                     break;
@@ -585,8 +620,9 @@ OperationResult executeFindOneAndUpdate(final BsonDocument operation) {
         });
     }
 
+    @SuppressWarnings("deprecation")
     OperationResult executeFindOneAndReplace(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument filter = arguments.getDocument("filter").asDocument();
@@ -633,6 +669,9 @@ OperationResult executeFindOneAndReplace(final BsonDocument operation) {
                 case "let":
                     options.let(cur.getValue().asDocument());
                     break;
+                case "maxTimeMS":
+                    options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "collation":
                     options.collation(asCollation(cur.getValue().asDocument()));
                     break;
@@ -650,8 +689,9 @@ OperationResult executeFindOneAndReplace(final BsonDocument operation) {
         });
     }
 
+    @SuppressWarnings("deprecation") //maxTimeMS
     OperationResult executeFindOneAndDelete(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument filter = arguments.getDocument("filter").asDocument();
@@ -684,6 +724,9 @@ OperationResult executeFindOneAndDelete(final BsonDocument operation) {
                 case "let":
                     options.let(cur.getValue().asDocument());
                     break;
+                case "maxTimeMS":
+                    options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS);
+                    break;
                 default:
                     throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
             }
@@ -700,53 +743,61 @@ OperationResult executeFindOneAndDelete(final BsonDocument operation) {
 
     OperationResult executeAggregate(final BsonDocument operation) {
         String entityName = operation.getString("object").getValue();
-
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         List<BsonDocument> pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList());
         AggregateIterable<BsonDocument> iterable;
         if (entities.hasDatabase(entityName)) {
+            Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments"));
+            MongoDatabase database = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS);
             iterable = session == null
-                    ? entities.getDatabase(entityName).aggregate(requireNonNull(pipeline), BsonDocument.class)
-                    : entities.getDatabase(entityName).aggregate(session, requireNonNull(pipeline), BsonDocument.class);
+                    ? database.aggregate(requireNonNull(pipeline), BsonDocument.class)
+                    : database.aggregate(session, requireNonNull(pipeline), BsonDocument.class);
         } else if (entities.hasCollection(entityName)) {
+            Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments"));
+            MongoCollection<BsonDocument> collection = entities.getCollectionWithTimeoutMS(entityName, timeoutMS);
             iterable = session == null
-                    ? entities.getCollection(entityName).aggregate(requireNonNull(pipeline))
-                    : entities.getCollection(entityName).aggregate(session, requireNonNull(pipeline));
+                    ? collection.aggregate(requireNonNull(pipeline))
+                    : collection.aggregate(session, requireNonNull(pipeline));
         } else {
             throw new UnsupportedOperationException("Unsupported entity type with name: " + entityName);
         }
-        for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
-            switch (cur.getKey()) {
-                case "pipeline":
-                case "session":
-                    break;
-                case "batchSize":
-                    iterable.batchSize(cur.getValue().asNumber().intValue());
-                    break;
-                case "allowDiskUse":
-                    iterable.allowDiskUse(cur.getValue().asBoolean().getValue());
-                    break;
-                case "let":
-                    iterable.let(cur.getValue().asDocument());
-                    break;
-                case "comment":
-                    iterable.comment(cur.getValue());
-                    break;
-                case "maxTimeMS":
-                    iterable.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS);
-                    break;
-                case "collation":
-                    iterable.collation(asCollation(cur.getValue().asDocument()));
-                    break;
-                default:
-                    throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
-            }
-        }
-        String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey();
-        boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge");
-
         return resultOf(() -> {
+            for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
+                switch (cur.getKey()) {
+                    case "pipeline":
+                    case "session":
+                        break;
+                    case "batchSize":
+                        iterable.batchSize(cur.getValue().asNumber().intValue());
+                        break;
+                    case "allowDiskUse":
+                        iterable.allowDiskUse(cur.getValue().asBoolean().getValue());
+                        break;
+                    case "let":
+                        iterable.let(cur.getValue().asDocument());
+                        break;
+                    case "collation":
+                        iterable.collation(asCollation(cur.getValue().asDocument()));
+                        break;
+                    case "comment":
+                        iterable.comment(cur.getValue());
+                        break;
+                    case "timeoutMode":
+                        setTimeoutMode(iterable, cur);
+                        break;
+                    case "maxTimeMS":
+                        iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
+                    case "maxAwaitTimeMS":
+                        iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
+                    default:
+                        throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
+                }
+            }
+            String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey();
+            boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge");
             if (!pipeline.isEmpty() && useToCollection) {
                 iterable.toCollection();
                 return null;
@@ -757,7 +808,7 @@ OperationResult executeAggregate(final BsonDocument operation) {
     }
 
     OperationResult executeDeleteOne(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument filter = arguments.getDocument("filter");
         ClientSession session = getSession(arguments);
@@ -773,7 +824,7 @@ OperationResult executeDeleteOne(final BsonDocument operation) {
     }
 
     OperationResult executeDeleteMany(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument filter = arguments.getDocument("filter");
         ClientSession session = getSession(arguments);
@@ -797,7 +848,7 @@ private BsonDocument toExpected(final DeleteResult result) {
     }
 
     OperationResult executeUpdateOne(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument filter = arguments.getDocument("filter");
@@ -821,7 +872,7 @@ OperationResult executeUpdateOne(final BsonDocument operation) {
     }
 
     OperationResult executeUpdateMany(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument filter = arguments.getDocument("filter");
         BsonValue update = arguments.get("update");
@@ -844,7 +895,7 @@ OperationResult executeUpdateMany(final BsonDocument operation) {
     }
 
     OperationResult executeReplaceOne(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument filter = arguments.getDocument("filter");
@@ -877,7 +928,7 @@ private BsonDocument toExpected(final UpdateResult result) {
 
 
     OperationResult executeInsertOne(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument document = arguments.getDocument("document").asDocument();
@@ -911,7 +962,7 @@ private BsonDocument toExpected(final InsertOneResult result) {
     }
 
     OperationResult executeInsertMany(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         List<BsonDocument> documents = arguments.getArray("documents").stream().map(BsonValue::asDocument).collect(toList());
         ClientSession session = getSession(arguments);
@@ -952,7 +1003,7 @@ private BsonDocument toExpected(final InsertManyResult result) {
     }
 
     OperationResult executeBulkWrite(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         List<WriteModel<BsonDocument>> requests = arguments.getArray("requests").stream()
@@ -1156,6 +1207,9 @@ OperationResult executeStartTransaction(final BsonDocument operation) {
                 case "readConcern":
                     optionsBuilder.readConcern(asReadConcern(cur.getValue().asDocument()));
                     break;
+                case "timeoutMS":
+                    optionsBuilder.timeout(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "maxCommitTimeMS":
                     optionsBuilder.maxCommitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
                     break;
@@ -1174,7 +1228,7 @@ OperationResult executeCommitTransaction(final BsonDocument operation) {
         ClientSession session = entities.getSession(operation.getString("object").getValue());
 
         if (operation.containsKey("arguments")) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
         }
 
         return resultOf(() -> {
@@ -1187,7 +1241,7 @@ OperationResult executeAbortTransaction(final BsonDocument operation) {
         ClientSession session = entities.getSession(operation.getString("object").getValue());
 
         if (operation.containsKey("arguments")) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+            throw new UnsupportedOperationException("Unexpected arguments: " + operation.get("arguments"));
         }
 
         return resultOf(() -> {
@@ -1210,6 +1264,9 @@ OperationResult executeWithTransaction(final BsonDocument operation, final Opera
                 case "writeConcern":
                     optionsBuilder.writeConcern(asWriteConcern(entry.getValue().asDocument()));
                     break;
+                case "timeoutMS":
+                    optionsBuilder.timeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                    break;
                 case "maxCommitTimeMS":
                     optionsBuilder.maxCommitTime(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
                     break;
@@ -1232,12 +1289,12 @@ OperationResult executeWithTransaction(final BsonDocument operation, final Opera
     }
 
     public OperationResult executeDropCollection(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        MongoDatabase database = getMongoDatabase(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         String collectionName = arguments.getString("collection").getValue();
 
-        if (operation.getDocument("arguments", new BsonDocument()).size() > 1) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+        if (operation.getDocument("arguments").size() > 1) {
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
         }
 
         return resultOf(() -> {
@@ -1247,7 +1304,7 @@ public OperationResult executeDropCollection(final BsonDocument operation) {
     }
 
     public OperationResult executeCreateCollection(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        MongoDatabase database = getMongoDatabase(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         String collectionName = arguments.getString("collection").getValue();
         ClientSession session = getSession(arguments);
@@ -1317,7 +1374,7 @@ public OperationResult executeCreateCollection(final BsonDocument operation) {
     }
 
     public OperationResult executeModifyCollection(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        MongoDatabase database = getMongoDatabase(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         String collectionName = arguments.getString("collection").getValue();
         ClientSession session = getSession(arguments);
@@ -1350,7 +1407,7 @@ public OperationResult executeModifyCollection(final BsonDocument operation) {
     }
 
     public OperationResult executeRenameCollection(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         String newCollectionName = arguments.getString("to").getValue();
         ClientSession session = getSession(arguments);
@@ -1448,7 +1505,7 @@ private TimeSeriesGranularity createTimeSeriesGranularity(final String value) {
 
 
     OperationResult executeCreateSearchIndex(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument model = arguments.getDocument("model");
         BsonDocument definition = model.getDocument("definition");
@@ -1465,7 +1522,7 @@ OperationResult executeCreateSearchIndex(final BsonDocument operation) {
     }
 
     OperationResult executeCreateSearchIndexes(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonArray models = arguments.getArray("models");
 
@@ -1480,7 +1537,7 @@ OperationResult executeCreateSearchIndexes(final BsonDocument operation) {
 
 
     OperationResult executeUpdateSearchIndex(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument definition = arguments.getDocument("definition");
         String name = arguments.getString("name").getValue();
@@ -1492,7 +1549,7 @@ OperationResult executeUpdateSearchIndex(final BsonDocument operation) {
     }
 
     OperationResult executeDropSearchIndex(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         String name = arguments.getString("name").getValue();
 
@@ -1516,7 +1573,7 @@ private static SearchIndexModel toIndexSearchModel(final BsonValue bsonValue) {
 
 
     OperationResult executeListSearchIndexes(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         Optional<BsonDocument> arguments = Optional.ofNullable(operation.getOrDefault("arguments", null)).map(BsonValue::asDocument);
 
         if (arguments.isPresent()) {
@@ -1555,7 +1612,7 @@ private ListSearchIndexesIterable<BsonDocument> createListSearchIndexesIterable(
     }
 
     public OperationResult executeCreateIndex(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument keys = arguments.getDocument("keys").asDocument();
         ClientSession session = getSession(arguments);
@@ -1588,27 +1645,63 @@ public OperationResult executeCreateIndex(final BsonDocument operation) {
     }
 
     public OperationResult executeDropIndex(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         String indexName = arguments.get("name").asString().getValue();
+
+        if (!arguments.containsKey("name")) {
+            throw new UnsupportedOperationException("Drop index without name is not supported");
+        }
+
+        DropIndexOptions options = getDropIndexOptions(arguments);
+        return resultOf(() -> {
+            if (session == null) {
+                collection.dropIndex(indexName, options);
+            } else {
+                collection.dropIndex(session, indexName, options);
+            }
+            return null;
+        });
+    }
+
+    public OperationResult executeDropIndexes(final BsonDocument operation) {
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
+
+        if (operation.containsKey("arguments")) {
+            BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
+            ClientSession session = getSession(arguments);
+            DropIndexOptions options = getDropIndexOptions(arguments);
+            return resultOf(() -> {
+                if (session == null) {
+                    collection.dropIndexes(options);
+                } else {
+                    collection.dropIndexes(session, options);
+                }
+                return null;
+            });
+        }
+        return resultOf(() -> {
+            collection.dropIndexes();
+            return null;
+        });
+    }
+
+    private static DropIndexOptions getDropIndexOptions(final BsonDocument arguments) {
+        DropIndexOptions options = new DropIndexOptions();
         for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
             switch (cur.getKey()) {
                 case "session":
                 case "name":
                     break;
+                case "maxTimeMS":
+                    options.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS);
+                    break;
                 default:
                     throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
             }
         }
-        return resultOf(() -> {
-            if (session == null) {
-                collection.dropIndex(indexName);
-            } else {
-                collection.dropIndex(session, indexName);
-            }
-            return null;
-        });
+        return options;
     }
 
     public OperationResult createChangeStreamCursor(final BsonDocument operation) {
@@ -1616,43 +1709,48 @@ public OperationResult createChangeStreamCursor(final BsonDocument operation) {
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         List<BsonDocument> pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList());
         ChangeStreamIterable<BsonDocument> iterable;
+
+        Long timeoutMS = arguments.containsKey("timeoutMS") ? arguments.remove("timeoutMS").asNumber().longValue() : null;
         if (entities.hasCollection(entityName)) {
-            iterable = entities.getCollection(entityName).watch(pipeline);
+            iterable = entities.getCollectionWithTimeoutMS(entityName, timeoutMS).watch(pipeline);
         } else if (entities.hasDatabase(entityName)) {
-            iterable = entities.getDatabase(entityName).watch(pipeline, BsonDocument.class);
+            iterable = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class);
         } else if (entities.hasClient(entityName)) {
-            iterable = entities.getClient(entityName).watch(pipeline, BsonDocument.class);
+            iterable = entities.getMongoClusterWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class);
         } else {
             throw new UnsupportedOperationException("No entity found for id: " + entityName);
         }
 
-        for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
-            switch (cur.getKey()) {
-                case "batchSize":
-                    iterable.batchSize(cur.getValue().asNumber().intValue());
-                    break;
-                case "pipeline":
-                    break;
-                case "comment":
-                    iterable.comment(cur.getValue());
-                    break;
-                case "fullDocument":
-                    iterable.fullDocument(FullDocument.fromString(cur.getValue().asString().getValue()));
-                    break;
-                case "fullDocumentBeforeChange":
-                    iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.fromString(cur.getValue().asString().getValue()));
-                    break;
-                case "showExpandedEvents":
-                    iterable.showExpandedEvents(cur.getValue().asBoolean().getValue());
-                    break;
-                default:
-                    throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
-            }
-        }
-
         return resultOf(() -> {
+            for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
+                switch (cur.getKey()) {
+                    case "batchSize":
+                        iterable.batchSize(cur.getValue().asNumber().intValue());
+                        break;
+                    case "pipeline":
+                        break;
+                    case "comment":
+                        iterable.comment(cur.getValue());
+                        break;
+                    case "fullDocument":
+                        iterable.fullDocument(FullDocument.fromString(cur.getValue().asString().getValue()));
+                        break;
+                    case "fullDocumentBeforeChange":
+                        iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.fromString(cur.getValue().asString().getValue()));
+                        break;
+                    case "maxAwaitTimeMS":
+                        iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS);
+                        break;
+                    case "showExpandedEvents":
+                        iterable.showExpandedEvents(cur.getValue().asBoolean().getValue());
+                        break;
+                    default:
+                        throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
+                }
+            }
+            MongoCursor<BsonDocument> changeStreamWrappingCursor = createChangeStreamWrappingCursor(iterable);
             entities.addCursor(operation.getString("saveResultAsEntity",
-                    new BsonString(createRandomEntityId())).getValue(), createChangeStreamWrappingCursor(iterable));
+                    new BsonString(createRandomEntityId())).getValue(), changeStreamWrappingCursor);
             return null;
         });
     }
@@ -1662,12 +1760,24 @@ public OperationResult executeIterateUntilDocumentOrError(final BsonDocument ope
         MongoCursor<BsonDocument> cursor = entities.getCursor(id);
 
         if (operation.containsKey("arguments")) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
         }
 
         return resultOf(cursor::next);
     }
 
+
+    public OperationResult executeIterateOnce(final BsonDocument operation) {
+        String id = operation.getString("object").getValue();
+        MongoCursor<BsonDocument> cursor = entities.getCursor(id);
+
+        if (operation.containsKey("arguments")) {
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
+        }
+
+        return resultOf(cursor::tryNext);
+    }
+
     public OperationResult close(final BsonDocument operation) {
         String id = operation.getString("object").getValue();
 
@@ -1682,7 +1792,7 @@ public OperationResult close(final BsonDocument operation) {
     }
 
     public OperationResult executeRunCommand(final BsonDocument operation) {
-        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        MongoDatabase database = getMongoDatabase(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         ClientSession session = getSession(arguments);
         BsonDocument command = arguments.getDocument("command");
@@ -1718,7 +1828,7 @@ public OperationResult executeRunCommand(final BsonDocument operation) {
     }
 
     public OperationResult executeCountDocuments(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
         BsonDocument filter = arguments.getDocument("filter");
         ClientSession session = getSession(arguments);
@@ -1756,7 +1866,7 @@ public OperationResult executeCountDocuments(final BsonDocument operation) {
     }
 
     public OperationResult executeEstimatedDocumentCount(final BsonDocument operation) {
-        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        MongoCollection<BsonDocument> collection = getMongoCollection(operation);
         BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
 
         EstimatedDocumentCountOptions options = new EstimatedDocumentCountOptions();
@@ -1851,4 +1961,85 @@ private BsonDocument encodeChangeStreamDocumentToBsonDocument(final ChangeStream
             };
         }
     }
+
+    private MongoCollection<BsonDocument> getMongoCollection(final BsonDocument operation) {
+        MongoCollection<BsonDocument> collection = entities.getCollection(operation.getString("object").getValue());
+        Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument()));
+        if (timeoutMS != null) {
+            collection = collection.withTimeout(timeoutMS, TimeUnit.MILLISECONDS);
+        }
+        return collection;
+    }
+    private MongoDatabase getMongoDatabase(final BsonDocument operation) {
+        MongoDatabase database = entities.getDatabase(operation.getString("object").getValue());
+        if (operation.containsKey("arguments")) {
+            BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
+            Long timeoutMS = getAndRemoveTimeoutMS(arguments);
+            if (timeoutMS != null) {
+                database = database.withTimeout(timeoutMS, TimeUnit.MILLISECONDS);
+                arguments.remove("timeoutMS");
+            }
+        }
+        return database;
+    }
+
+    private MongoCluster getMongoCluster(final BsonDocument operation) {
+        MongoCluster mongoCluster = entities.getClient(operation.getString("object").getValue());
+        if (operation.containsKey("arguments")) {
+            BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
+            Long timeoutMS = getAndRemoveTimeoutMS(arguments);
+            if (timeoutMS != null) {
+                mongoCluster = mongoCluster.withTimeout(timeoutMS, TimeUnit.MILLISECONDS);
+                arguments.remove("timeoutMS");
+            }
+        }
+        return mongoCluster;
+    }
+
+    private static void setCursorType(final FindIterable<BsonDocument> iterable, final Map.Entry<String, BsonValue> cur) {
+        switch (cur.getValue().asString().getValue()) {
+            case "tailable":
+                iterable.cursorType(CursorType.Tailable);
+                break;
+            case "nonTailable":
+                iterable.cursorType(CursorType.NonTailable);
+                break;
+            case "tailableAwait":
+                iterable.cursorType(CursorType.TailableAwait);
+                break;
+            default:
+                throw new UnsupportedOperationException("Unsupported cursorType: " + cur.getValue());
+        }
+    }
+
+    private static void setTimeoutMode(final MongoIterable<BsonDocument> iterable, final Map.Entry<String, BsonValue> cur) {
+         switch (cur.getValue().asString().getValue()) {
+            case "cursorLifetime":
+                invokeTimeoutMode(iterable, TimeoutMode.CURSOR_LIFETIME);
+                break;
+            case "iteration":
+                invokeTimeoutMode(iterable, TimeoutMode.ITERATION);
+                break;
+            default:
+                throw new UnsupportedOperationException("Unsupported timeoutMode: " + cur.getValue());
+        }
+    }
+
+    private static void invokeTimeoutMode(final MongoIterable<BsonDocument> iterable, final TimeoutMode timeoutMode) {
+        try {
+            Method timeoutModeMethod = iterable.getClass().getDeclaredMethod("timeoutMode", TimeoutMode.class);
+            timeoutModeMethod.setAccessible(true);
+            timeoutModeMethod.invoke(iterable, timeoutMode);
+        } catch (NoSuchMethodException e) {
+            throw new UnsupportedOperationException("Unsupported timeoutMode method for class: " + iterable.getClass(), e);
+        } catch (IllegalAccessException e) {
+            throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), e);
+        } catch (InvocationTargetException e) {
+            Throwable targetException = e.getTargetException();
+            if (targetException instanceof IllegalArgumentException) {
+                throw (IllegalArgumentException) targetException;
+            }
+            throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), targetException);
+        }
+    }
 }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java
index 59ae4e2f0e5..13e95a58463 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java
@@ -17,7 +17,9 @@
 package com.mongodb.client.unified;
 
 import com.mongodb.client.gridfs.GridFSBucket;
+import com.mongodb.client.gridfs.GridFSFindIterable;
 import com.mongodb.client.gridfs.model.GridFSDownloadOptions;
+import com.mongodb.client.gridfs.model.GridFSFile;
 import com.mongodb.client.gridfs.model.GridFSUploadOptions;
 import com.mongodb.internal.HexUtils;
 import org.bson.BsonDocument;
@@ -32,25 +34,61 @@
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.util.ArrayList;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 import static java.util.Objects.requireNonNull;
 
-final class UnifiedGridFSHelper {
+final class UnifiedGridFSHelper extends UnifiedHelper{
     private final Entities entities;
 
     UnifiedGridFSHelper(final Entities entities) {
         this.entities = entities;
     }
 
+    public OperationResult executeFind(final BsonDocument operation) {
+        GridFSFindIterable iterable = createGridFSFindIterable(operation);
+        try {
+            ArrayList<GridFSFile> target = new ArrayList<>();
+            iterable.into(target);
+
+            if (target.isEmpty()) {
+                return OperationResult.NONE;
+            }
+
+            throw new UnsupportedOperationException("expectResult is not implemented for Unified GridFS tests. "
+                    + "Unexpected result: " + target);
+        } catch (Exception e) {
+            return OperationResult.of(e);
+        }
+    }
+
+    public OperationResult executeRename(final BsonDocument operation) {
+        GridFSBucket bucket = getGridFsBucket(operation);
+        BsonDocument arguments = operation.getDocument("arguments");
+        BsonValue id = arguments.get("id");
+        String fileName = arguments.get("newFilename").asString().getValue();
+
+        requireNonNull(id);
+        requireNonNull(fileName);
+
+        try {
+            bucket.rename(id, fileName);
+            return OperationResult.NONE;
+        } catch (Exception e) {
+            return OperationResult.of(e);
+        }
+    }
+
     OperationResult executeDelete(final BsonDocument operation) {
-        GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue());
+        GridFSBucket bucket = getGridFsBucket(operation);
 
         BsonDocument arguments = operation.getDocument("arguments");
         BsonValue id = arguments.get("id");
 
         if (arguments.size() > 1) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+            throw new UnsupportedOperationException("Unexpected arguments " + arguments);
         }
 
         requireNonNull(id);
@@ -63,14 +101,29 @@ OperationResult executeDelete(final BsonDocument operation) {
         }
     }
 
+    public OperationResult executeDrop(final BsonDocument operation) {
+        GridFSBucket bucket = getGridFsBucket(operation);
+        BsonDocument arguments = operation.getDocument("arguments", new BsonDocument());
+        if (arguments.size() > 0) {
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
+        }
+
+        try {
+            bucket.drop();
+            return OperationResult.NONE;
+        } catch (Exception e) {
+            return OperationResult.of(e);
+        }
+    }
+
     public OperationResult executeDownload(final BsonDocument operation) {
-        GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue());
+        GridFSBucket bucket = getGridFsBucket(operation);
 
         BsonDocument arguments = operation.getDocument("arguments");
         BsonValue id = arguments.get("id");
 
         if (arguments.size() > 1) {
-            throw new UnsupportedOperationException("Unexpected arguments");
+            throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments"));
         }
 
         requireNonNull(id);
@@ -119,7 +172,7 @@ private GridFSDownloadOptions getDownloadOptions(final BsonDocument arguments) {
     }
 
     public OperationResult executeUpload(final BsonDocument operation) {
-        GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue());
+        GridFSBucket bucket = getGridFsBucket(operation);
 
         BsonDocument arguments = operation.getDocument("arguments");
         String filename = null;
@@ -165,4 +218,46 @@ public OperationResult executeUpload(final BsonDocument operation) {
     Document asDocument(final BsonDocument bsonDocument) {
         return new DocumentCodec().decode(new BsonDocumentReader(bsonDocument), DecoderContext.builder().build());
     }
+
+    private GridFSBucket getGridFsBucket(final BsonDocument operation) {
+        GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue());
+        Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument()));
+        if (timeoutMS != null) {
+            bucket = bucket.withTimeout(timeoutMS, TimeUnit.MILLISECONDS);
+        }
+        return bucket;
+    }
+
+    private GridFSFindIterable createGridFSFindIterable(final BsonDocument operation) {
+        GridFSBucket bucket = getGridFsBucket(operation);
+
+        BsonDocument arguments = operation.getDocument("arguments");
+        BsonDocument filter = arguments.getDocument("filter");
+        GridFSFindIterable iterable = bucket.find(filter);
+        for (Map.Entry<String, BsonValue> cur : arguments.entrySet()) {
+            switch (cur.getKey()) {
+                case "session":
+                case "filter":
+                    break;
+                case "sort":
+                    iterable.sort(cur.getValue().asDocument());
+                    break;
+                case "batchSize":
+                    iterable.batchSize(cur.getValue().asInt32().intValue());
+                    break;
+                case "maxTimeMS":
+                    iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS);
+                    break;
+                case "skip":
+                    iterable.skip(cur.getValue().asInt32().intValue());
+                    break;
+                case "limit":
+                    iterable.limit(cur.getValue().asInt32().intValue());
+                    break;
+                default:
+                    throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey());
+            }
+        }
+        return iterable;
+    }
 }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java
new file mode 100644
index 00000000000..027ccf92fb5
--- /dev/null
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.unified;
+
+import org.bson.BsonDocument;
+
+abstract class UnifiedHelper {
+
+    static Long getAndRemoveTimeoutMS(final BsonDocument arguments) {
+        Long timeoutMS = null;
+        if (arguments.containsKey("timeoutMS")) {
+            timeoutMS = arguments.getNumber("timeoutMS").longValue();
+            arguments.remove("timeoutMS");
+        }
+        return timeoutMS;
+    }
+}
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java
index e88abd6669f..ae7ad39a2f5 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java
@@ -146,10 +146,6 @@ LogMatcher getLogMatcher() {
     protected UnifiedTest() {
     }
 
-    protected void ignoreExtraEvents() {
-        ignoreExtraEvents = true;
-    }
-
     public Entities getEntities() {
         return entities;
     }
@@ -380,6 +376,7 @@ private void assertOperation(final UnifiedTestContext context, final BsonDocumen
     private static void assertOperationResult(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex,
             final OperationResult result) {
         context.getAssertionContext().push(ContextElement.ofCompletedOperation(operation, result, operationIndex));
+
         if (!operation.getBoolean("ignoreResultAndError", BsonBoolean.FALSE).getValue()) {
             if (operation.containsKey("expectResult")) {
                 assertNull(result.getException(),
@@ -400,6 +397,7 @@ private static void assertOperationResult(final UnifiedTestContext context, fina
     private OperationResult executeOperation(final UnifiedTestContext context, final BsonDocument operation, final int operationNum) {
         context.getAssertionContext().push(ContextElement.ofStartedOperation(operation, operationNum));
         String name = operation.getString("name").getValue();
+        String object = operation.getString("object").getValue();
         try {
             switch (name) {
                 case "createEntities":
@@ -469,6 +467,9 @@ private OperationResult executeOperation(final UnifiedTestContext context, final
                 case "aggregate":
                     return crudHelper.executeAggregate(operation);
                 case "find":
+                    if ("bucket".equals(object)){
+                        return gridFSHelper.executeFind(operation);
+                    }
                     return crudHelper.executeFind(operation);
                 case "findOne":
                     return crudHelper.executeFindOne(operation);
@@ -505,6 +506,9 @@ private OperationResult executeOperation(final UnifiedTestContext context, final
                 case "modifyCollection":
                     return crudHelper.executeModifyCollection(operation);
                 case "rename":
+                    if ("bucket".equals(object)){
+                        return gridFSHelper.executeRename(operation);
+                    }
                     return crudHelper.executeRenameCollection(operation);
                 case "createSearchIndex":
                     return crudHelper.executeCreateSearchIndex(operation);
@@ -520,6 +524,8 @@ private OperationResult executeOperation(final UnifiedTestContext context, final
                     return crudHelper.executeCreateIndex(operation);
                 case "dropIndex":
                     return crudHelper.executeDropIndex(operation);
+                case "dropIndexes":
+                    return crudHelper.executeDropIndexes(operation);
                 case "startTransaction":
                     return crudHelper.executeStartTransaction(operation);
                 case "commitTransaction":
@@ -536,8 +542,12 @@ private OperationResult executeOperation(final UnifiedTestContext context, final
                     return crudHelper.close(operation);
                 case "iterateUntilDocumentOrError":
                     return crudHelper.executeIterateUntilDocumentOrError(operation);
+                case "iterateOnce":
+                    return crudHelper.executeIterateOnce(operation);
                 case "delete":
                     return gridFSHelper.executeDelete(operation);
+                case "drop":
+                    return gridFSHelper.executeDrop(operation);
                 case "download":
                     return gridFSHelper.executeDownload(operation);
                 case "downloadByName":
@@ -910,7 +920,7 @@ private OperationResult executeAssertLsidOnLastTwoCommands(final BsonDocument op
                 operation.getDocument("arguments").getString("client").getValue());
         List<CommandEvent> events = lastTwoCommandEvents(listener);
         String eventsJson = listener.getCommandStartedEvents().stream()
-                .map(e -> ((CommandStartedEvent) e).getCommand().toJson())
+                .map(e -> e.getCommand().toJson())
                 .collect(Collectors.joining(", "));
         BsonDocument expected = ((CommandStartedEvent) events.get(0)).getCommand().getDocument("lsid");
         BsonDocument actual = ((CommandStartedEvent) events.get(1)).getCommand().getDocument("lsid");
@@ -976,9 +986,9 @@ private boolean indexExists(final BsonDocument operation) {
     }
 
     private List<CommandEvent> lastTwoCommandEvents(final TestCommandListener listener) {
-        List<CommandEvent> events = listener.getCommandStartedEvents();
+        List<CommandStartedEvent> events = listener.getCommandStartedEvents();
         assertTrue(events.size() >= 2);
-        return events.subList(events.size() - 2, events.size());
+        return new ArrayList<>(events.subList(events.size() - 2, events.size()));
     }
 
     private BsonDocument addInitialDataAndGetClusterTime() {
@@ -988,7 +998,7 @@ private BsonDocument addInitialDataAndGetClusterTime() {
                     new MongoNamespace(curDataSet.getString("databaseName").getValue(),
                             curDataSet.getString("collectionName").getValue()));
 
-            helper.create(WriteConcern.MAJORITY);
+            helper.create(WriteConcern.MAJORITY, curDataSet.getDocument("createOptions", new BsonDocument()));
 
             BsonArray documentsArray = curDataSet.getArray("documents", new BsonArray());
             if (!documentsArray.isEmpty()) {
@@ -998,4 +1008,12 @@ private BsonDocument addInitialDataAndGetClusterTime() {
         }
         return getCurrentClusterTime();
     }
+
+    protected void ignoreExtraCommandEvents(final boolean ignoreExtraEvents) {
+        this.ignoreExtraEvents = ignoreExtraEvents;
+    }
+
+    protected void ignoreExtraEvents() {
+        this.ignoreExtraEvents = true;
+    }
 }
diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java
index fb8b0520d26..899769d2d9f 100644
--- a/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java
+++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java
@@ -122,6 +122,10 @@ private void assertValuesMatch(final BsonValue initialExpected, @Nullable final
                                 actualValue = BsonDocument.parse(actualValue.asString().getValue());
                                 value = value.asDocument().getDocument("$$matchAsDocument");
                                 break;
+                            case "$$lte":
+                                value = value.asDocument().getNumber("$$lte");
+                                assertTrue(actualValue.asNumber().longValue() <= value.asNumber().longValue());
+                                return;
                             default:
                                 throw new UnsupportedOperationException("Unsupported special operator: " + value.asDocument().getFirstKey());
                         }
diff --git a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy
index 80eced15c60..a947effd36f 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy
@@ -32,6 +32,7 @@ import com.mongodb.connection.ClusterType
 import com.mongodb.connection.ServerConnectionState
 import com.mongodb.connection.ServerDescription
 import com.mongodb.connection.ServerType
+import com.mongodb.internal.TimeoutSettings
 import com.mongodb.internal.client.model.changestream.ChangeStreamLevel
 import com.mongodb.internal.connection.Cluster
 import org.bson.BsonDocument
@@ -46,6 +47,7 @@ import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.ReadPreference.secondary
 import static com.mongodb.client.internal.TestHelper.execute
+import static java.util.concurrent.TimeUnit.SECONDS
 import static org.bson.UuidRepresentation.C_SHARP_LEGACY
 import static org.bson.UuidRepresentation.UNSPECIFIED
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
@@ -54,7 +56,8 @@ import static spock.util.matcher.HamcrestSupport.expect
 
 class MongoClientSpecification extends Specification {
 
-    private static CodecRegistry codecRegistry = fromProviders(new ValueCodecProvider())
+    private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider())
+    private static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(120))
 
     def 'should pass the correct settings to getDatabase'() {
         given:
@@ -63,7 +66,7 @@ class MongoClientSpecification extends Specification {
                 .writeConcern(WriteConcern.MAJORITY)
                 .readConcern(ReadConcern.MAJORITY)
                 .retryWrites(true)
-                .codecRegistry(codecRegistry)
+                .codecRegistry(CODEC_REGISTRY)
                 .build()
         def client = new MongoClientImpl(Stub(Cluster), null, settings, new TestOperationExecutor([]))
 
@@ -74,8 +77,9 @@ class MongoClientSpecification extends Specification {
         expect database, isTheSameAs(expectedDatabase)
 
         where:
-        expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(codecRegistry, UNSPECIFIED), secondary(),
-                WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, new TestOperationExecutor([]))
+        expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(CODEC_REGISTRY, UNSPECIFIED), secondary(),
+                WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null,
+                TIMEOUT_SETTINGS, new TestOperationExecutor([]))
     }
 
     def 'should use ListDatabasesIterableImpl correctly'() {
@@ -90,14 +94,14 @@ class MongoClientSpecification extends Specification {
 
         then:
         expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document,
-                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true))
+                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS))
 
         when:
         listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument)
 
         then:
         expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument,
-                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true))
+                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS))
 
         when:
         def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable<String>
@@ -105,7 +109,8 @@ class MongoClientSpecification extends Specification {
         then:
         // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it
         expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument,
-                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true).nameOnly(true))
+                withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS)
+                .nameOnly(true))
 
         cleanup:
         client?.close()
@@ -134,7 +139,7 @@ class MongoClientSpecification extends Specification {
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace,
                 withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED),
-                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true),
+                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS),
                 ['codec'])
 
         when:
@@ -144,7 +149,7 @@ class MongoClientSpecification extends Specification {
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace,
                 withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED),
                 readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT,
-                true), ['codec'])
+                true, TIMEOUT_SETTINGS), ['codec'])
 
         when:
         changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument)
@@ -153,7 +158,7 @@ class MongoClientSpecification extends Specification {
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace,
                 withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED),
                 readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument,
-                ChangeStreamLevel.CLIENT, true), ['codec'])
+                ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec'])
 
         where:
         session << [null, Stub(ClientSession)]
diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy
index 7ae3e568bf4..cb34236c627 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy
@@ -33,6 +33,7 @@ import com.mongodb.client.internal.OperationExecutor
 import com.mongodb.client.internal.TestOperationExecutor
 import com.mongodb.client.result.DeleteResult
 import com.mongodb.client.result.UpdateResult
+import com.mongodb.internal.TimeoutSettings
 import com.mongodb.internal.operation.BatchCursor
 import com.mongodb.internal.operation.FindOperation
 import org.bson.BsonBinary
@@ -46,6 +47,9 @@ import org.bson.types.ObjectId
 import spock.lang.Specification
 import spock.lang.Unroll
 
+import java.util.concurrent.TimeUnit
+
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.ReadPreference.secondary
@@ -61,7 +65,7 @@ class GridFSBucketSpecification extends Specification {
     def database = databaseWithExecutor(Stub(OperationExecutor))
     def databaseWithExecutor(OperationExecutor executor) {
         new MongoDatabaseImpl('test', registry, primary(), WriteConcern.ACKNOWLEDGED, false, false, readConcern,
-                JAVA_LEGACY, null, executor)
+                JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
     }
 
     def 'should return the correct bucket name'() {
@@ -156,7 +160,9 @@ class GridFSBucketSpecification extends Specification {
         given:
         def defaultChunkSizeBytes = 255 * 1024
         def database = new MongoDatabaseImpl('test', fromProviders(new DocumentCodecProvider()), secondary(), WriteConcern.ACKNOWLEDGED,
-                false, false, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([]))
+                false, false, readConcern, JAVA_LEGACY, null,
+                new TimeoutSettings(0, 0, 0, null, 0),
+                new TestOperationExecutor([]))
 
         when:
         def gridFSBucket = new GridFSBucketImpl(database)
@@ -172,6 +178,9 @@ class GridFSBucketSpecification extends Specification {
         given:
         def filesCollection = Stub(MongoCollection)
         def chunksCollection = Stub(MongoCollection)
+        filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
+        chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
+
         def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection)
 
         when:
@@ -184,7 +193,7 @@ class GridFSBucketSpecification extends Specification {
 
         then:
         expect stream, isTheSameAs(new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, stream.getId(), 'filename',
-                255, null), ['closeLock'])
+                255, null, null), ['closeLock'])
 
         where:
         clientSession << [null, Stub(ClientSession)]
@@ -291,7 +300,9 @@ class GridFSBucketSpecification extends Specification {
         def fileInfo = new GridFSFile(fileId, 'File 1', 10, 255, new Date(), new Document())
         def findIterable =  Mock(FindIterable)
         def filesCollection = Mock(MongoCollection)
+        filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
         def chunksCollection = Stub(MongoCollection)
+        chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
         def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection)
 
         when:
@@ -312,7 +323,8 @@ class GridFSBucketSpecification extends Specification {
         1 * findIterable.first() >> fileInfo
 
         then:
-        expect stream, isTheSameAs(new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection), ['closeLock', 'cursorLock'])
+        expect stream, isTheSameAs(new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection,
+                null), ['closeLock', 'cursorLock'])
 
 
         where:
@@ -522,7 +534,9 @@ class GridFSBucketSpecification extends Specification {
         def fileInfo = new GridFSFile(bsonFileId, filename, 10, 255, new Date(), new Document())
         def findIterable =  Mock(FindIterable)
         def filesCollection = Mock(MongoCollection)
+        filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
         def chunksCollection = Stub(MongoCollection)
+        chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null
         def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection)
 
         when:
@@ -540,7 +554,7 @@ class GridFSBucketSpecification extends Specification {
         1 * findIterable.first() >> fileInfo
 
         then:
-        expect stream, isTheSameAs(new GridFSDownloadStreamImpl(null, fileInfo, chunksCollection), ['closeLock', 'cursorLock'])
+        expect stream, isTheSameAs(new GridFSDownloadStreamImpl(null, fileInfo, chunksCollection, null), ['closeLock', 'cursorLock'])
 
         where:
         version | skip | sortOrder
@@ -600,8 +614,8 @@ class GridFSBucketSpecification extends Specification {
 
         then:
         executor.getReadPreference() == secondary()
-        expect executor.getReadOperation(), isTheSameAs(new FindOperation<GridFSFile>(new MongoNamespace('test.fs.files'), decoder)
-                .filter(filter))
+        expect executor.getReadOperation(), isTheSameAs(
+                new FindOperation<GridFSFile>(new MongoNamespace('test.fs.files'), decoder).filter(filter))
     }
 
     def 'should throw an exception if file not found when opening by name'() {
diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy
index d8b109b1f4b..0064cc9aad8 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy
@@ -16,6 +16,7 @@
 
 package com.mongodb.client.gridfs
 
+import com.mongodb.ClusterFixture
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.WriteConcern
@@ -35,7 +36,7 @@ class GridFSBucketsSpecification extends Specification {
     def 'should create a GridFSBucket with default bucket name'() {
         given:
         def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern,
-                JAVA_LEGACY, null, Stub(OperationExecutor))
+                JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor))
 
         when:
         def gridFSBucket = GridFSBuckets.create(database)
@@ -48,7 +49,7 @@ class GridFSBucketsSpecification extends Specification {
     def 'should create a GridFSBucket with custom bucket name'() {
         given:
         def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern,
-                JAVA_LEGACY, null, Stub(OperationExecutor))
+                JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor))
         def customName = 'custom'
 
         when:
diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy
index d39ee094230..59bf12ec3a4 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy
@@ -35,7 +35,7 @@ class GridFSDownloadStreamSpecification extends Specification {
 
     def 'should return the file info'() {
         when:
-        def downloadStream = new GridFSDownloadStreamImpl(null, fileInfo, Stub(MongoCollection))
+        def downloadStream = new GridFSDownloadStreamImpl(null, fileInfo, Stub(MongoCollection), null)
 
         then:
         downloadStream.getGridFSFile() == fileInfo
@@ -59,7 +59,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         then:
         downloadStream.available() == 0
@@ -132,7 +132,8 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection).batchSize(1)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection,
+                null).batchSize(1)
 
         then:
         downloadStream.available() == 0
@@ -215,7 +216,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         def skipResult = downloadStream.skip(15)
@@ -293,7 +294,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         def readByte = new byte[10]
@@ -362,7 +363,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         downloadStream.mark()
@@ -439,7 +440,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         def readByte = new byte[25]
@@ -496,7 +497,7 @@ class GridFSDownloadStreamSpecification extends Specification {
 
     def 'should not throw an exception when trying to mark post close'() {
         given:
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection))
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null)
         downloadStream.close()
 
         when:
@@ -517,7 +518,7 @@ class GridFSDownloadStreamSpecification extends Specification {
 
     def 'should handle negative skip value correctly '() {
         given:
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection))
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null)
 
         when:
         def result = downloadStream.skip(-1)
@@ -532,7 +533,7 @@ class GridFSDownloadStreamSpecification extends Specification {
     def 'should handle skip that is larger or equal to the file length'() {
         given:
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         def result = downloadStream.skip(skipValue)
@@ -553,7 +554,7 @@ class GridFSDownloadStreamSpecification extends Specification {
 
     def 'should throw if trying to pass negative batchSize'() {
         given:
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection))
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null)
 
         when:
         downloadStream.batchSize(0)
@@ -577,7 +578,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         downloadStream.read()
@@ -609,7 +610,7 @@ class GridFSDownloadStreamSpecification extends Specification {
         def mongoCursor = Mock(MongoCursor)
         def findIterable = Mock(FindIterable)
         def chunksCollection = Mock(MongoCollection)
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection)
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null)
 
         when:
         downloadStream.read()
@@ -635,7 +636,7 @@ class GridFSDownloadStreamSpecification extends Specification {
 
     def 'should throw an exception when trying to action post close'() {
         given:
-        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection))
+        def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null)
         downloadStream.close()
 
         when:
diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy
index e0686420665..632e59a16d0 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy
@@ -38,6 +38,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -56,7 +57,7 @@ class GridFSFindIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor,
-                new Document())
+                new Document(), true, TIMEOUT_SETTINGS)
         def findIterable = new GridFSFindIterableImpl(underlying)
 
         when: 'default input should be as expected'
@@ -73,7 +74,7 @@ class GridFSFindIterableSpecification extends Specification {
         when: 'overriding initial options'
         findIterable.filter(new Document('filter', 2))
                 .sort(new Document('sort', 2))
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .batchSize(99)
                 .limit(99)
                 .skip(9)
@@ -87,7 +88,6 @@ class GridFSFindIterableSpecification extends Specification {
         expect operation, isTheSameAs(new FindOperation<GridFSFile>(namespace, gridFSFileCodec)
                 .filter(new BsonDocument('filter', new BsonInt32(2)))
                 .sort(new BsonDocument('sort', new BsonInt32(2)))
-                .maxTime(999, MILLISECONDS)
                 .batchSize(99)
                 .limit(99)
                 .skip(9)
@@ -101,7 +101,7 @@ class GridFSFindIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def findIterable = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern,
-                executor, new Document('filter', 1))
+                executor, new Document('filter', 1), true, TIMEOUT_SETTINGS)
 
         when:
         findIterable.filter(new Document('filter', 1))
@@ -148,7 +148,7 @@ class GridFSFindIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor,
-                new Document())
+                new Document(), true, TIMEOUT_SETTINGS)
         def mongoIterable = new GridFSFindIterableImpl(underlying)
 
         when:
diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy
index e3df2c225e1..c81f947abf0 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy
@@ -35,7 +35,7 @@ class GridFSUploadStreamSpecification extends Specification {
     def 'should return the file id'() {
         when:
         def uploadStream = new GridFSUploadStreamImpl(null, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255
-                , metadata)
+                , metadata, null)
         then:
         uploadStream.getId() == fileId
     }
@@ -45,7 +45,7 @@ class GridFSUploadStreamSpecification extends Specification {
         def filesCollection = Mock(MongoCollection)
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 2
-                , metadata)
+                , metadata, null)
         when:
         uploadStream.write(1)
 
@@ -71,7 +71,7 @@ class GridFSUploadStreamSpecification extends Specification {
         def filesCollection = Mock(MongoCollection)
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255
-                , null)
+                , null, null)
 
         when:
         uploadStream.write('file content ' as byte[])
@@ -101,7 +101,8 @@ class GridFSUploadStreamSpecification extends Specification {
         def chunksCollection = Mock(MongoCollection)
         def content = 'file content ' as byte[]
         def metadata = new Document('contentType', 'text/txt')
-        def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255, metadata)
+        def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255,
+                metadata, null)
         def filesId = fileId
 
         when:
@@ -159,7 +160,7 @@ class GridFSUploadStreamSpecification extends Specification {
         def filesCollection = Mock(MongoCollection)
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255
-                , metadata)
+                , metadata, null)
         when:
         uploadStream.close()
 
@@ -179,7 +180,7 @@ class GridFSUploadStreamSpecification extends Specification {
         given:
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255
-                , metadata)
+                , metadata, null)
 
         when:
         uploadStream.write('file content ' as byte[])
@@ -199,7 +200,7 @@ class GridFSUploadStreamSpecification extends Specification {
     def 'should close the stream on abort'() {
         given:
         def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255
-                , metadata)
+                , metadata, null)
         uploadStream.write('file content ' as byte[])
         uploadStream.abort()
 
@@ -217,7 +218,7 @@ class GridFSUploadStreamSpecification extends Specification {
         given:
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255
-                , metadata)
+                , metadata, null)
 
         when:
         uploadStream.write('file content ' as byte[])
@@ -235,7 +236,7 @@ class GridFSUploadStreamSpecification extends Specification {
         def filesCollection = Mock(MongoCollection)
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255
-                , metadata)
+                , metadata, null)
         when:
         uploadStream.close()
         uploadStream.write(1)
@@ -253,7 +254,7 @@ class GridFSUploadStreamSpecification extends Specification {
         def filesCollection = Mock(MongoCollection)
         def chunksCollection = Mock(MongoCollection)
         def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255
-                , metadata)
+                , metadata, null)
         when:
         uploadStream.getObjectId()
 
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy
index 64bbae0ad1f..733ee4c57df 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy
@@ -41,6 +41,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -62,7 +63,7 @@ class AggregateIterableSpecification extends Specification {
         def pipeline = [new Document('$match', 1)]
         def aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
                 readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION,
-                true)
+                true, TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         aggregationIterable.iterator()
@@ -78,8 +79,8 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'overriding initial options'
         aggregationIterable
-                .maxAwaitTime(99, MILLISECONDS)
-                .maxTime(999, MILLISECONDS)
+                .maxAwaitTime(1001, MILLISECONDS)
+                .maxTime(101, MILLISECONDS)
                 .collation(collation)
                 .hint(new Document('a', 1))
                 .comment('this is a comment')
@@ -93,13 +94,11 @@ class AggregateIterableSpecification extends Specification {
                 .retryReads(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
-                .comment(new BsonString('this is a comment'))
-                .maxAwaitTime(99, MILLISECONDS)
-                .maxTime(999, MILLISECONDS))
+                .comment(new BsonString('this is a comment')))
 
         when: 'both hint and hint string are set'
         aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
 
         aggregationIterable
                 .hint(new Document('a', 1))
@@ -123,9 +122,8 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $out'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .batchSize(99)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -138,7 +136,6 @@ class AggregateIterableSpecification extends Specification {
         expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace,
                 [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))],
                 readConcern, writeConcern, AggregationLevel.COLLECTION)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
@@ -152,14 +149,12 @@ class AggregateIterableSpecification extends Specification {
         operation.getNamespace() == collectionNamespace
         operation.getBatchSize() == 99
         operation.getCollation() == collation
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
 
         when: 'aggregation includes $out and is at the database level'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.DATABASE, false)
+                pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS)
                 .batchSize(99)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -173,7 +168,6 @@ class AggregateIterableSpecification extends Specification {
                 [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))],
                 readConcern, writeConcern,
                 AggregationLevel.DATABASE)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
@@ -187,13 +181,11 @@ class AggregateIterableSpecification extends Specification {
         operation.getNamespace() == collectionNamespace
         operation.getBatchSize() == 99
         operation.getCollation() == collation
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
         operation.isAllowDiskUse() == null
 
         when: 'toCollection should work as expected'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -220,7 +212,7 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $out and hint string'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .hintString('x_1').iterator()
 
         def operation = executor.getReadOperation() as AggregateToCollectionOperation
@@ -234,7 +226,7 @@ class AggregateIterableSpecification extends Specification {
         when: 'aggregation includes $out and hint and hint string'
         executor = new TestOperationExecutor([null, null, null, null, null])
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .hint(new BsonDocument('x', new BsonInt32(1)))
                 .hintString('x_1').iterator()
 
@@ -258,9 +250,8 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $merge'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .batchSize(99)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -274,7 +265,6 @@ class AggregateIterableSpecification extends Specification {
                  new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))],
                 readConcern, writeConcern,
                 AggregationLevel.COLLECTION)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
@@ -288,14 +278,12 @@ class AggregateIterableSpecification extends Specification {
         operation.getNamespace() == collectionNamespace
         operation.getBatchSize() == 99
         operation.getCollation() == collation
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
 
         when: 'aggregation includes $merge into a different database'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipelineWithIntoDocument, AggregationLevel.COLLECTION, false)
+                pipelineWithIntoDocument, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .batchSize(99)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -310,7 +298,6 @@ class AggregateIterableSpecification extends Specification {
                          new BsonDocument('db', new BsonString('db2')).append('coll', new BsonString(collectionName))))],
                 readConcern, writeConcern,
                 AggregationLevel.COLLECTION)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
@@ -324,14 +311,12 @@ class AggregateIterableSpecification extends Specification {
         operation.getNamespace() == new MongoNamespace('db2', collectionName)
         operation.getBatchSize() == 99
         operation.getCollation() == collation
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
 
         when: 'aggregation includes $merge and is at the database level'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.DATABASE, false)
+                pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS)
                 .batchSize(99)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -345,7 +330,6 @@ class AggregateIterableSpecification extends Specification {
                  new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))],
                 readConcern, writeConcern,
                 AggregationLevel.DATABASE)
-                .maxTime(999, MILLISECONDS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new BsonDocument('a', new BsonInt32(1)))
@@ -359,12 +343,10 @@ class AggregateIterableSpecification extends Specification {
         operation.getNamespace() == collectionNamespace
         operation.getBatchSize() == 99
         operation.getCollation() == collation
-        operation.getMaxAwaitTime(MILLISECONDS) == 0
-        operation.getMaxTime(MILLISECONDS) == 0
 
         when: 'toCollection should work as expected'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .allowDiskUse(true)
                 .collation(collation)
                 .hint(new Document('a', 1))
@@ -393,14 +375,14 @@ class AggregateIterableSpecification extends Specification {
 
         when:
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false)
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
                 .iterator()
 
         def operation = executor.getReadOperation() as AggregateToCollectionOperation
 
         then:
-        expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, writeConcern,
-                AggregationLevel.COLLECTION))
+        expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern,
+                writeConcern, AggregationLevel.COLLECTION))
 
         when:
         operation = executor.getReadOperation() as FindOperation<Document>
@@ -436,7 +418,7 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $out'
         def aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
 
         aggregateIterable.toCollection()
         def operation = executor.getReadOperation() as AggregateToCollectionOperation
@@ -455,7 +437,7 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $out and is at the database level'
         aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS)
         aggregateIterable.toCollection()
 
         operation = executor.getReadOperation() as AggregateToCollectionOperation
@@ -474,7 +456,7 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'toCollection should work as expected'
         aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
         aggregateIterable.toCollection()
 
         operation = executor.getReadOperation() as AggregateToCollectionOperation
@@ -492,7 +474,7 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'aggregation includes $out with namespace'
         aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
         aggregateIterable.toCollection()
 
         operation = executor.getReadOperation() as AggregateToCollectionOperation
@@ -519,7 +501,7 @@ class AggregateIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def pipeline = [new Document('$match', 1)]
         def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
 
         when:
         aggregationIterable.first()
@@ -545,7 +527,7 @@ class AggregateIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null])
         def pipeline = [new Document('$match', 1), new Document('$out', 'collName')]
         def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
 
         when:
         aggregationIterable.first()
@@ -576,7 +558,7 @@ class AggregateIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([new MongoException('failure')])
         def pipeline = [new BsonDocument('$match', new BsonInt32(1))]
         def aggregationIterable = new AggregateIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS)
 
         when: 'The operation fails with an exception'
         aggregationIterable.iterator()
@@ -592,14 +574,14 @@ class AggregateIterableSpecification extends Specification {
 
         when: 'a codec is missing'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                pipeline, AggregationLevel.COLLECTION, false).iterator()
+                pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator()
 
         then:
         thrown(CodecConfigurationException)
 
         when: 'pipeline contains null'
         new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                [null], AggregationLevel.COLLECTION, false).iterator()
+                [null], AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator()
 
         then:
         thrown(IllegalArgumentException)
@@ -627,7 +609,8 @@ class AggregateIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false)
+                readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false,
+                TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -672,7 +655,7 @@ class AggregateIterableSpecification extends Specification {
         def batchSize = 5
         def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
                 readConcern, writeConcern, Stub(OperationExecutor), [new Document('$match', 1)], AggregationLevel.COLLECTION,
-                false)
+                false, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy
index 7141db09c43..b66373b221f 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy
@@ -20,7 +20,6 @@ import com.mongodb.Function
 import com.mongodb.MongoException
 import com.mongodb.MongoNamespace
 import com.mongodb.ReadConcern
-import com.mongodb.WriteConcern
 import com.mongodb.client.ClientSession
 import com.mongodb.client.model.Collation
 import com.mongodb.client.model.changestream.ChangeStreamDocument
@@ -43,6 +42,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -54,7 +54,6 @@ class ChangeStreamIterableSpecification extends Specification {
     def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()])
     def readPreference = secondary()
     def readConcern = ReadConcern.MAJORITY
-    def writeConcern = WriteConcern.MAJORITY
     def collation = Collation.builder().locale('en').build()
 
     def 'should build the expected ChangeStreamOperation'() {
@@ -62,7 +61,7 @@ class ChangeStreamIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([null, null, null, null, null])
         def pipeline = [new Document('$match', 1)]
         def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern,
-                executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true)
+                executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         changeStreamIterable.iterator()
@@ -72,14 +71,17 @@ class ChangeStreamIterableSpecification extends Specification {
         def readPreference = executor.getReadPreference()
 
         then:
-        expect operation, isTheSameAs(new ChangeStreamOperation<Document>(namespace, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT,
-                [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION).retryReads(true))
+        expect operation, isTheSameAs(new ChangeStreamOperation<Document>(namespace,
+                FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [BsonDocument.parse('{$match: 1}')], codec,
+                ChangeStreamLevel.COLLECTION)
+                .retryReads(true))
         readPreference == secondary()
 
         when: 'overriding initial options'
         def resumeToken = RawBsonDocument.parse('{_id: {a: 1}}')
         def startAtOperationTime = new BsonTimestamp(99)
-        changeStreamIterable.collation(collation).maxAwaitTime(99, MILLISECONDS)
+        changeStreamIterable.collation(collation)
+                .maxAwaitTime(101, MILLISECONDS)
                 .fullDocument(FullDocument.UPDATE_LOOKUP)
                 .fullDocumentBeforeChange(FullDocumentBeforeChange.WHEN_AVAILABLE)
                 .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime)
@@ -88,12 +90,14 @@ class ChangeStreamIterableSpecification extends Specification {
         operation = executor.getReadOperation() as ChangeStreamOperation<Document>
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new ChangeStreamOperation<Document>(namespace, FullDocument.UPDATE_LOOKUP,
-                FullDocumentBeforeChange.WHEN_AVAILABLE,
-                [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION)
+        expect operation, isTheSameAs(new ChangeStreamOperation<Document>(namespace,
+                FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.WHEN_AVAILABLE, [BsonDocument.parse('{$match: 1}')], codec,
+                ChangeStreamLevel.COLLECTION)
                 .retryReads(true)
-                .collation(collation).maxAwaitTime(99, MILLISECONDS)
-                .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime).startAfter(resumeToken))
+                .collation(collation)
+                .resumeAfter(resumeToken)
+                .startAtOperationTime(startAtOperationTime)
+                .startAfter(resumeToken))
     }
 
     def 'should use ClientSession'() {
@@ -103,7 +107,7 @@ class ChangeStreamIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def changeStreamIterable = new ChangeStreamIterableImpl(clientSession, namespace, codecRegistry, readPreference, readConcern,
-                executor, [], Document, ChangeStreamLevel.COLLECTION, true)
+                executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS)
 
         when:
         changeStreamIterable.first()
@@ -127,7 +131,7 @@ class ChangeStreamIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([new MongoException('failure')])
         def pipeline = [new BsonDocument('$match', new BsonInt32(1))]
         def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern,
-                executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true)
+                executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS)
 
         when: 'The operation fails with an exception'
         changeStreamIterable.iterator()
@@ -137,14 +141,14 @@ class ChangeStreamIterableSpecification extends Specification {
 
         when: 'a codec is missing'
         new ChangeStreamIterableImpl(null, namespace, altRegistry, readPreference, readConcern, executor, pipeline, Document,
-                ChangeStreamLevel.COLLECTION, true).iterator()
+                ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator()
 
         then:
         thrown(CodecConfigurationException)
 
         when: 'pipeline contains null'
         new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [null], Document,
-                ChangeStreamLevel.COLLECTION, true).iterator()
+                ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator()
 
         then:
         thrown(IllegalArgumentException)
@@ -159,7 +163,7 @@ class ChangeStreamIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults),
                                                   cursor(cannedResults)])
         def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [],
-                Document, ChangeStreamLevel.COLLECTION, true)
+                Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -207,7 +211,7 @@ class ChangeStreamIterableSpecification extends Specification {
         def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults),
                                                   cursor(cannedResults)])
         def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [],
-                Document, ChangeStreamLevel.COLLECTION, true).withDocumentClass(RawBsonDocument)
+                Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).withDocumentClass(RawBsonDocument)
 
         when:
         def results = mongoIterable.first()
@@ -251,7 +255,8 @@ class ChangeStreamIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern,
-                Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true)
+                Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true,
+                TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy
index 329e8e9a8b8..49332bc8ed3 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy
@@ -19,7 +19,6 @@ package com.mongodb.client.internal
 import com.mongodb.ReadConcern
 import com.mongodb.ReadPreference
 import com.mongodb.client.ClientSession
-import com.mongodb.internal.IgnorableRequestContext
 import com.mongodb.internal.binding.ClusterBinding
 import com.mongodb.internal.binding.ConnectionSource
 import com.mongodb.internal.binding.ReadWriteBinding
@@ -27,15 +26,19 @@ import com.mongodb.internal.connection.Cluster
 import com.mongodb.internal.session.ClientSessionContext
 import spock.lang.Specification
 
+import static com.mongodb.ClusterFixture.OPERATION_CONTEXT
+
 class ClientSessionBindingSpecification extends Specification {
     def 'should return the session context from the binding'() {
         given:
         def session = Stub(ClientSession)
-        def wrappedBinding = Stub(ClusterBinding)
+        def wrappedBinding = Stub(ClusterBinding) {
+            getOperationContext() >> OPERATION_CONTEXT
+        }
         def binding = new ClientSessionBinding(session, false, wrappedBinding)
 
         when:
-        def context = binding.getSessionContext()
+        def context = binding.getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -44,12 +47,14 @@ class ClientSessionBindingSpecification extends Specification {
     def 'should return the session context from the connection source'() {
         given:
         def session = Stub(ClientSession)
-        def wrappedBinding = Mock(ClusterBinding)
+        def wrappedBinding = Mock(ClusterBinding) {
+            getOperationContext() >> OPERATION_CONTEXT
+        }
         def binding = new ClientSessionBinding(session, false, wrappedBinding)
 
         when:
         def readConnectionSource = binding.getReadConnectionSource()
-        def context = readConnectionSource.getSessionContext()
+        def context = readConnectionSource.getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -59,7 +64,7 @@ class ClientSessionBindingSpecification extends Specification {
 
         when:
         def writeConnectionSource = binding.getWriteConnectionSource()
-        context = writeConnectionSource.getSessionContext()
+        context = writeConnectionSource.getOperationContext().getSessionContext()
 
         then:
         (context as ClientSessionContext).getClientSession() == session
@@ -144,7 +149,7 @@ class ClientSessionBindingSpecification extends Specification {
         def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding)
 
         then:
-        binding.getSessionContext().isImplicitSession() == ownsSession
+        binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession
 
         where:
         ownsSession << [true, false]
@@ -152,6 +157,6 @@ class ClientSessionBindingSpecification extends Specification {
 
     private ReadWriteBinding createStubBinding() {
         def cluster = Stub(Cluster)
-        new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE)
+        new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)
     }
 }
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy
index 990c39a4634..18a13195d00 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy
@@ -16,20 +16,19 @@
 
 package com.mongodb.client.internal
 
+import com.mongodb.ClusterFixture
 import com.mongodb.ReadPreference
 import com.mongodb.ServerAddress
 import com.mongodb.connection.ClusterId
 import com.mongodb.connection.ConnectionDescription
 import com.mongodb.connection.ConnectionId
 import com.mongodb.connection.ServerId
-import com.mongodb.internal.IgnorableRequestContext
-import com.mongodb.internal.binding.StaticBindingContext
+import com.mongodb.internal.TimeoutContext
 import com.mongodb.internal.bulk.InsertRequest
 import com.mongodb.internal.bulk.WriteRequestWithIndex
 import com.mongodb.internal.connection.Connection
-import com.mongodb.internal.connection.NoOpSessionContext
-import com.mongodb.internal.connection.OperationContext
 import com.mongodb.internal.connection.SplittablePayload
+import com.mongodb.internal.time.Timeout
 import com.mongodb.internal.validator.NoOpFieldNameValidator
 import org.bson.BsonArray
 import org.bson.BsonBinary
@@ -60,27 +59,32 @@ class CryptConnectionSpecification extends Specification {
         def crypt = Mock(Crypt)
         def cryptConnection = new CryptConnection(wrappedConnection, crypt)
         def codec = new DocumentCodec()
+        def timeoutContext = Mock(TimeoutContext)
+        def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext)
+        def operationTimeout = Mock(Timeout)
+        timeoutContext.getTimeout() >> operationTimeout
+
         def encryptedCommand = toRaw(new BsonDocument('find', new BsonString('test'))
                 .append('ssid', new BsonBinary(6 as byte, new byte[10])))
 
         def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))
                 .append('cursor',
-                new BsonDocument('firstBatch',
-                        new BsonArray([new BsonDocument('_id', new BsonInt32(1))
-                                               .append('ssid', new BsonBinary(6 as byte, new byte[10]))]))))
+                        new BsonDocument('firstBatch',
+                                new BsonArray([new BsonDocument('_id', new BsonInt32(1))
+                                                       .append('ssid', new BsonBinary(6 as byte, new byte[10]))]))))
 
         def decryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))
                 .append('cursor', new BsonDocument('firstBatch',
-                new BsonArray([new BsonDocument('_id', new BsonInt32(1))
-                                       .append('ssid', new BsonString('555-55-5555'))]))))
-        def operationContext = new OperationContext()
-        def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext)
+                        new BsonArray([new BsonDocument('_id', new BsonInt32(1))
+                                               .append('ssid', new BsonString('555-55-5555'))]))))
+
 
         when:
+
         def response = cryptConnection.command('db',
                 new BsonDocumentWrapper(new Document('find', 'test')
                         .append('filter', new Document('ssid', '555-55-5555')), codec),
-                new NoOpFieldNameValidator(), ReadPreference.primary(), codec, context)
+                new NoOpFieldNameValidator(), ReadPreference.primary(), codec, operationContext)
 
         then:
         _ * wrappedConnection.getDescription() >> {
@@ -88,14 +92,14 @@ class CryptConnectionSpecification extends Specification {
                     1000, 1024 * 16_000, 1024 * 48_000, [])
         }
         1 * crypt.encrypt('db', toRaw(new BsonDocument('find', new BsonString('test'))
-                .append('filter', new BsonDocument('ssid', new BsonString('555-55-5555'))))) >> {
-             encryptedCommand
+                .append('filter', new BsonDocument('ssid', new BsonString('555-55-5555')))), operationTimeout) >> {
+            encryptedCommand
         }
         1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(),
-                _ as RawBsonDocumentCodec, context, true, null, null) >> {
+                _ as RawBsonDocumentCodec, operationContext, true, null, null) >> {
             encryptedResponse
         }
-        1 * crypt.decrypt(encryptedResponse) >> {
+        1 * crypt.decrypt(encryptedResponse, operationTimeout) >> {
             decryptedResponse
         }
         response == rawToDocument(decryptedResponse)
@@ -121,14 +125,16 @@ class CryptConnectionSpecification extends Specification {
 
         def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)))
         def decryptedResponse = encryptedResponse
-        def operationContext = new OperationContext()
-        def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext)
+        def timeoutContext = Mock(TimeoutContext)
+        def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext)
+        def operationTimeout = Mock(Timeout)
+        timeoutContext.getTimeout() >> operationTimeout
 
         when:
         def response = cryptConnection.command('db',
                 new BsonDocumentWrapper(new Document('insert', 'test'), codec),
                 new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(),
-                context, true, payload, new NoOpFieldNameValidator(),)
+                operationContext, true, payload, new NoOpFieldNameValidator(),)
 
         then:
         _ * wrappedConnection.getDescription() >> {
@@ -141,14 +147,14 @@ class CryptConnectionSpecification extends Specification {
                                 new BsonDocument('_id', new BsonInt32(1))
                                         .append('ssid', new BsonString('555-55-5555'))
                                         .append('b', new BsonBinary(bytes))
-                        ])))) >> {
+                        ]))), operationTimeout) >> {
             encryptedCommand
         }
         1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(),
-                _ as RawBsonDocumentCodec, context, true, null, null,) >> {
+                _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> {
             encryptedResponse
         }
-        1 * crypt.decrypt(encryptedResponse) >> {
+        1 * crypt.decrypt(encryptedResponse, operationTimeout) >> {
             decryptedResponse
         }
         response == rawToBsonDocument(decryptedResponse)
@@ -176,13 +182,15 @@ class CryptConnectionSpecification extends Specification {
 
         def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)))
         def decryptedResponse = encryptedResponse
-        def operationContext = new OperationContext()
-        def context = new StaticBindingContext(NoOpSessionContext.INSTANCE, null, IgnorableRequestContext.INSTANCE, operationContext)
+        def timeoutContext = Mock(TimeoutContext)
+        def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext)
+        def operationTimeout = Mock(Timeout)
+        timeoutContext.getTimeout() >> operationTimeout
 
         when:
         def response = cryptConnection.command('db',
                 new BsonDocumentWrapper(new Document('insert', 'test'), codec),
-                new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), context, true, payload,
+                new NoOpFieldNameValidator(), ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload,
                 new NoOpFieldNameValidator())
 
         then:
@@ -195,14 +203,14 @@ class CryptConnectionSpecification extends Specification {
                         new BsonArray([
                                 new BsonDocument('_id', new BsonInt32(1)),
                                 new BsonDocument('_id', new BsonInt32(2))
-                        ])))) >> {
+                        ]))), operationTimeout) >> {
             encryptedCommand
         }
         1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(),
-                _ as RawBsonDocumentCodec, context, true, null, null,) >> {
+                _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> {
             encryptedResponse
         }
-        1 * crypt.decrypt(encryptedResponse) >> {
+        1 * crypt.decrypt(encryptedResponse, operationTimeout) >> {
             decryptedResponse
         }
         response == rawToBsonDocument(decryptedResponse)
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy
index 8a7898581a2..3baac05653a 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy
@@ -37,6 +37,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -55,7 +56,7 @@ class DistinctIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def distinctIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, 'field', new BsonDocument(), true)
+                executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         distinctIterable.iterator()
@@ -69,14 +70,14 @@ class DistinctIterableSpecification extends Specification {
         readPreference == secondary()
 
         when: 'overriding initial options'
-        distinctIterable.filter(new Document('field', 1)).maxTime(999, MILLISECONDS).batchSize(99).collation(collation).iterator()
+        distinctIterable.filter(new Document('field', 1)).maxTime(100, MILLISECONDS).batchSize(99).collation(collation).iterator()
 
         operation = executor.getReadOperation() as DistinctOperation<Document>
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new DistinctOperation<Document>(namespace, 'field', new DocumentCodec())
-                .filter(new BsonDocument('field', new BsonInt32(1)))
-                .maxTime(999, MILLISECONDS).collation(collation).retryReads(true))
+        expect operation, isTheSameAs(
+                new DistinctOperation<Document>(namespace, 'field', new DocumentCodec())
+                        .filter(new BsonDocument('field', new BsonInt32(1))).collation(collation).retryReads(true))
     }
 
     def 'should use ClientSession'() {
@@ -86,7 +87,7 @@ class DistinctIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def distinctIterable = new DistinctIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, executor, 'field', new BsonDocument())
+                readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)
 
         when:
         distinctIterable.first()
@@ -109,7 +110,7 @@ class DistinctIterableSpecification extends Specification {
         def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()])
         def executor = new TestOperationExecutor([new MongoException('failure')])
         def distinctIterable = new DistinctIterableImpl(null, namespace, Document, BsonDocument, codecRegistry, readPreference,
-                readConcern, executor, 'field', new BsonDocument())
+                readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)
 
         when: 'The operation fails with an exception'
         distinctIterable.iterator()
@@ -145,7 +146,7 @@ class DistinctIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, ReadConcern.LOCAL,
-                executor, 'field', new BsonDocument())
+                executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -189,7 +190,7 @@ class DistinctIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                Stub(OperationExecutor), 'field', new BsonDocument())
+                Stub(OperationExecutor), 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy
index 98848a84dfa..e2f7cae2d62 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy
@@ -16,7 +16,6 @@
 
 package com.mongodb.client.internal
 
-
 import com.mongodb.CursorType
 import com.mongodb.Function
 import com.mongodb.MongoException
@@ -39,10 +38,10 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
-import static java.util.concurrent.TimeUnit.SECONDS
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
 import static spock.util.matcher.HamcrestSupport.expect
 
@@ -59,11 +58,9 @@ class FindIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null, null])
         def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document('filter', 1), true)
+                executor, new Document('filter', 1), true, TIMEOUT_SETTINGS)
                 .sort(new Document('sort', 1))
                 .projection(new Document('projection', 1))
-                .maxTime(10, SECONDS)
-                .maxAwaitTime(20, SECONDS)
                 .batchSize(100)
                 .limit(100)
                 .skip(10)
@@ -90,8 +87,6 @@ class FindIterableSpecification extends Specification {
                 .filter(new BsonDocument('filter', new BsonInt32(1)))
                 .sort(new BsonDocument('sort', new BsonInt32(1)))
                 .projection(new BsonDocument('projection', new BsonInt32(1)))
-                .maxTime(10000, MILLISECONDS)
-                .maxAwaitTime(20000, MILLISECONDS)
                 .batchSize(100)
                 .limit(100)
                 .skip(10)
@@ -111,8 +106,8 @@ class FindIterableSpecification extends Specification {
         findIterable.filter(new Document('filter', 2))
                 .sort(new Document('sort', 2))
                 .projection(new Document('projection', 2))
-                .maxTime(9, SECONDS)
-                .maxAwaitTime(18, SECONDS)
+                .maxTime(101, MILLISECONDS)
+                .maxAwaitTime(1001, MILLISECONDS)
                 .batchSize(99)
                 .limit(99)
                 .skip(9)
@@ -132,32 +127,31 @@ class FindIterableSpecification extends Specification {
         operation = executor.getReadOperation() as FindOperation<Document>
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new FindOperation<Document>(namespace, new DocumentCodec())
-                .filter(new BsonDocument('filter', new BsonInt32(2)))
-                .sort(new BsonDocument('sort', new BsonInt32(2)))
-                .projection(new BsonDocument('projection', new BsonInt32(2)))
-                .maxTime(9000, MILLISECONDS)
-                .maxAwaitTime(18000, MILLISECONDS)
-                .batchSize(99)
-                .limit(99)
-                .skip(9)
-                .cursorType(CursorType.Tailable)
-                .noCursorTimeout(true)
-                .partial(true)
-                .collation(collation)
-                .comment(new BsonString('alt comment'))
-                .hint(new BsonDocument('hint', new BsonInt32(2)))
-                .min(new BsonDocument('min', new BsonInt32(2)))
-                .max(new BsonDocument('max', new BsonInt32(2)))
-                .returnKey(true)
-                .showRecordId(true)
-                .allowDiskUse(true)
-                .retryReads(true)
+        expect operation, isTheSameAs(
+                new FindOperation<Document>(namespace, new DocumentCodec())
+                        .filter(new BsonDocument('filter', new BsonInt32(2)))
+                        .sort(new BsonDocument('sort', new BsonInt32(2)))
+                        .projection(new BsonDocument('projection', new BsonInt32(2)))
+                        .batchSize(99)
+                        .limit(99)
+                        .skip(9)
+                        .cursorType(CursorType.Tailable)
+                        .noCursorTimeout(true)
+                        .partial(true)
+                        .collation(collation)
+                        .comment(new BsonString('alt comment'))
+                        .hint(new BsonDocument('hint', new BsonInt32(2)))
+                        .min(new BsonDocument('min', new BsonInt32(2)))
+                        .max(new BsonDocument('max', new BsonInt32(2)))
+                        .returnKey(true)
+                        .showRecordId(true)
+                        .allowDiskUse(true)
+                        .retryReads(true)
         )
 
         when: 'passing nulls to nullable methods'
         new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document('filter', 1), true)
+                executor, new Document('filter', 1), true, TIMEOUT_SETTINGS)
                 .filter(null as Bson)
                 .collation(null)
                 .projection(null)
@@ -182,7 +176,7 @@ class FindIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def findIterable = new FindIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document('filter', 1))
+                executor, new Document('filter', 1), true, TIMEOUT_SETTINGS)
 
         when:
         findIterable.first()
@@ -204,7 +198,7 @@ class FindIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document('filter', 1), true)
+                executor, new Document('filter', 1), true, TIMEOUT_SETTINGS)
 
         when:
         findIterable.filter(new Document('filter', 1))
@@ -244,7 +238,7 @@ class FindIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document())
+                executor, new Document(), true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -288,7 +282,7 @@ class FindIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, Stub(OperationExecutor), new Document())
+                readConcern, Stub(OperationExecutor), new Document(), true, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
@@ -310,7 +304,7 @@ class FindIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor])
         def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                executor, new Document())
+                executor, new Document(), true, TIMEOUT_SETTINGS)
 
         when:
         mongoIterable.forEach(new Consumer<Document>() {
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy
index 3756a80094f..559935c05ee 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy
@@ -30,11 +30,12 @@ import org.bson.codecs.DocumentCodecProvider
 import org.bson.codecs.ValueCodecProvider
 import spock.lang.Specification
 
+import java.util.concurrent.TimeUnit
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
-import static java.util.concurrent.TimeUnit.MILLISECONDS
 import static org.bson.codecs.configuration.CodecRegistries.fromProviders
 import static spock.util.matcher.HamcrestSupport.expect
 
@@ -48,12 +49,11 @@ class ListCollectionsIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null, null, null])
         def listCollectionIterable = new ListCollectionsIterableImpl<Document>(null, 'db', false, Document, codecRegistry,
-                readPreference, executor, true)
+                readPreference, executor, true, TIMEOUT_SETTINGS)
                 .filter(new Document('filter', 1))
                 .batchSize(100)
-                .maxTime(1000, MILLISECONDS)
         def listCollectionNamesIterable = new ListCollectionsIterableImpl<Document>(null, 'db', true, Document, codecRegistry,
-                readPreference, executor, true)
+                readPreference, executor, true, TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         listCollectionIterable.iterator()
@@ -63,19 +63,19 @@ class ListCollectionsIterableSpecification extends Specification {
 
         then:
         expect operation, isTheSameAs(new ListCollectionsOperation<Document>('db', new DocumentCodec())
-                .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100).maxTime(1000, MILLISECONDS)
+                .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100)
                 .retryReads(true)
                 .authorizedCollections(false))
         readPreference == secondary()
 
         when: 'overriding initial options'
-        listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(999, MILLISECONDS).iterator()
+        listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(100, TimeUnit.MILLISECONDS).iterator()
 
         operation = executor.getReadOperation() as ListCollectionsOperation<Document>
 
         then: 'should use the overrides'
         expect operation, isTheSameAs(new ListCollectionsOperation<Document>('db', new DocumentCodec())
-                .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99).maxTime(999, MILLISECONDS)
+                .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99)
                 .retryReads(true))
 
         when: 'requesting collection names only'
@@ -105,7 +105,7 @@ class ListCollectionsIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def listCollectionIterable = new ListCollectionsIterableImpl<Document>(clientSession, 'db', false, Document, codecRegistry,
-                readPreference, executor, true)
+                readPreference, executor, true, TIMEOUT_SETTINGS)
 
         when:
         listCollectionIterable.first()
@@ -145,7 +145,7 @@ class ListCollectionsIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def mongoIterable = new ListCollectionsIterableImpl<Document>(null, 'db', false, Document, codecRegistry, readPreference,
-                executor, true)
+                executor, true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -189,7 +189,7 @@ class ListCollectionsIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new ListCollectionsIterableImpl<Document>(null, 'db', false, Document, codecRegistry, readPreference,
-                Stub(OperationExecutor), true)
+                Stub(OperationExecutor), true, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy
index bfe4adb26f9..8df91709486 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy
@@ -30,6 +30,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -45,8 +46,8 @@ class ListDatabasesIterableSpecification extends Specification {
     def 'should build the expected listCollectionOperation'() {
         given:
         def executor = new TestOperationExecutor([null, null, null])
-        def listDatabaseIterable = new ListDatabasesIterableImpl<Document>(null, Document, codecRegistry, readPreference, executor)
-                .maxTime(1000, MILLISECONDS)
+        def listDatabaseIterable = new ListDatabasesIterableImpl<Document>(null, Document, codecRegistry, readPreference, executor, true,
+                TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         listDatabaseIterable.iterator()
@@ -55,26 +56,26 @@ class ListDatabasesIterableSpecification extends Specification {
         def readPreference = executor.getReadPreference()
 
         then:
-        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec()).maxTime(1000, MILLISECONDS)
+        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec())
                 .retryReads(true))
         readPreference == secondary()
 
         when: 'overriding initial options'
-        listDatabaseIterable.maxTime(999, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator()
+        listDatabaseIterable.maxTime(100, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator()
 
         operation = executor.getReadOperation() as ListDatabasesOperation<Document>
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec()).maxTime(999, MILLISECONDS)
+        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec())
                 .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).retryReads(true))
 
         when: 'overriding initial options'
-        listDatabaseIterable.maxTime(101, MILLISECONDS).filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator()
+        listDatabaseIterable.filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator()
 
         operation = executor.getReadOperation() as ListDatabasesOperation<Document>
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec()).maxTime(101, MILLISECONDS)
+        expect operation, isTheSameAs(new ListDatabasesOperation<Document>(new DocumentCodec())
                 .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).authorizedDatabasesOnly(true).retryReads(true))
     }
 
@@ -99,7 +100,8 @@ class ListDatabasesIterableSpecification extends Specification {
             }
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
-        def mongoIterable = new ListDatabasesIterableImpl<Document>(null, Document, codecRegistry, readPreference, executor)
+        def mongoIterable = new ListDatabasesIterableImpl<Document>(null, Document, codecRegistry, readPreference, executor,
+                true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -143,7 +145,7 @@ class ListDatabasesIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new ListDatabasesIterableImpl<Document>(null, Document, codecRegistry, readPreference,
-                Stub(OperationExecutor))
+                Stub(OperationExecutor), true, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy
index d1090fe1525..d11c59d46d2 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy
@@ -31,6 +31,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -47,8 +48,8 @@ class ListIndexesIterableSpecification extends Specification {
     def 'should build the expected listIndexesOperation'() {
         given:
         def executor = new TestOperationExecutor([null, null])
-        def listIndexesIterable = new ListIndexesIterableImpl<Document>(null, namespace, Document, codecRegistry, readPreference, executor)
-                .batchSize(100).maxTime(1000, MILLISECONDS)
+        def listIndexesIterable = new ListIndexesIterableImpl<Document>(null, namespace, Document, codecRegistry, readPreference,
+                executor, true, TIMEOUT_SETTINGS).batchSize(100)
 
         when: 'default input should be as expected'
         listIndexesIterable.iterator()
@@ -58,19 +59,19 @@ class ListIndexesIterableSpecification extends Specification {
 
         then:
         expect operation, isTheSameAs(new ListIndexesOperation<Document>(namespace, new DocumentCodec())
-                .batchSize(100).maxTime(1000, MILLISECONDS).retryReads(true))
+                .batchSize(100).retryReads(true))
         readPreference == secondary()
 
         when: 'overriding initial options'
         listIndexesIterable.batchSize(99)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .iterator()
 
         operation = executor.getReadOperation() as ListIndexesOperation<Document>
 
         then: 'should use the overrides'
         expect operation, isTheSameAs(new ListIndexesOperation<Document>(namespace, new DocumentCodec())
-                .batchSize(99).maxTime(999, MILLISECONDS).retryReads(true))
+                .batchSize(99).retryReads(true))
     }
 
     def 'should use ClientSession'() {
@@ -80,7 +81,7 @@ class ListIndexesIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def listIndexesIterable = new ListIndexesIterableImpl<Document>(clientSession, namespace, Document, codecRegistry, readPreference,
-                executor)
+                executor, true, TIMEOUT_SETTINGS)
 
         when:
         listIndexesIterable.first()
@@ -120,7 +121,8 @@ class ListIndexesIterableSpecification extends Specification {
             }
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
-        def mongoIterable = new ListIndexesIterableImpl<Document>(null, namespace, Document, codecRegistry, readPreference, executor)
+        def mongoIterable = new ListIndexesIterableImpl<Document>(null, namespace, Document, codecRegistry, readPreference,
+                executor, true, TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -164,7 +166,7 @@ class ListIndexesIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new ListIndexesIterableImpl<Document>(null, namespace, Document, codecRegistry, readPreference,
-                Stub(OperationExecutor))
+                Stub(OperationExecutor), true, TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy
index c24f479b784..b6cb01d31cb 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy
@@ -42,6 +42,7 @@ import spock.lang.Specification
 
 import java.util.function.Consumer
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.secondary
 import static java.util.concurrent.TimeUnit.MILLISECONDS
@@ -62,7 +63,7 @@ class MapReduceIterableSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, 'map', 'reduce')
+                readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
 
         when: 'default input should be as expected'
         mapReduceIterable.iterator()
@@ -71,8 +72,8 @@ class MapReduceIterableSpecification extends Specification {
         def readPreference = executor.getReadPreference()
 
         then:
-        expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation<Document>(namespace, new BsonJavaScript('map'),
-                new BsonJavaScript('reduce'), new DocumentCodec())
+        expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation<Document>(namespace,
+                new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec())
                 .verbose(true))
         readPreference == secondary()
 
@@ -80,7 +81,7 @@ class MapReduceIterableSpecification extends Specification {
         mapReduceIterable.filter(new Document('filter', 1))
                 .finalizeFunction('finalize')
                 .limit(999)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .scope(new Document('scope', 1))
                 .sort(new Document('sort', 1))
                 .verbose(false)
@@ -90,12 +91,11 @@ class MapReduceIterableSpecification extends Specification {
         operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation<Document>).getOperation()
 
         then: 'should use the overrides'
-        expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation<Document>(namespace, new BsonJavaScript('map'),
-                new BsonJavaScript('reduce'), new DocumentCodec())
+        expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation<Document>(namespace,
+                new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec())
                 .filter(new BsonDocument('filter', new BsonInt32(1)))
                 .finalizeFunction(new BsonJavaScript('finalize'))
                 .limit(999)
-                .maxTime(999, MILLISECONDS)
                 .scope(new BsonDocument('scope', new BsonInt32(1)))
                 .sort(new BsonDocument('sort', new BsonInt32(1)))
                 .verbose(false)
@@ -109,14 +109,14 @@ class MapReduceIterableSpecification extends Specification {
 
         when: 'mapReduce to a collection'
         def collectionNamespace = new MongoNamespace('dbName', 'collName')
-        def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern,
-                writeConcern, executor, 'map', 'reduce')
+        def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry,
+                readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
                 .collectionName(collectionNamespace.getCollectionName())
                 .databaseName(collectionNamespace.getDatabaseName())
                 .filter(new Document('filter', 1))
                 .finalizeFunction('finalize')
                 .limit(999)
-                .maxTime(999, MILLISECONDS)
+                .maxTime(100, MILLISECONDS)
                 .scope(new Document('scope', 1))
                 .sort(new Document('sort', 1))
                 .verbose(false)
@@ -128,13 +128,12 @@ class MapReduceIterableSpecification extends Specification {
         mapReduceIterable.iterator()
 
         def operation = executor.getWriteOperation() as MapReduceToCollectionOperation
-        def expectedOperation = new MapReduceToCollectionOperation(namespace, new BsonJavaScript('map'),
-                new BsonJavaScript('reduce'), 'collName', writeConcern)
+        def expectedOperation = new MapReduceToCollectionOperation(namespace,
+                new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'collName', writeConcern)
                 .databaseName(collectionNamespace.getDatabaseName())
                 .filter(new BsonDocument('filter', new BsonInt32(1)))
                 .finalizeFunction(new BsonJavaScript('finalize'))
                 .limit(999)
-                .maxTime(999, MILLISECONDS)
                 .scope(new BsonDocument('scope', new BsonInt32(1)))
                 .sort(new BsonDocument('sort', new BsonInt32(1)))
                 .verbose(false)
@@ -170,7 +169,7 @@ class MapReduceIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([batchCursor, batchCursor])
         def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, 'map', 'reduce')
+                readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
 
         when:
         mapReduceIterable.first()
@@ -195,7 +194,7 @@ class MapReduceIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null])
         def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, 'map', 'reduce')
+                readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
                 .collectionName('collName')
 
         when:
@@ -228,7 +227,7 @@ class MapReduceIterableSpecification extends Specification {
         def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()])
         def executor = new TestOperationExecutor([new MongoException('failure')])
         def mapReduceIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry,
-                readPreference, readConcern, writeConcern, executor, 'map', 'reduce')
+                readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
 
 
         when: 'The operation fails with an exception'
@@ -245,7 +244,7 @@ class MapReduceIterableSpecification extends Specification {
 
         when: 'a codec is missing'
         new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor,
-                'map', 'reduce').iterator()
+                'map', 'reduce', TIMEOUT_SETTINGS).iterator()
 
         then:
         thrown(CodecConfigurationException)
@@ -274,7 +273,7 @@ class MapReduceIterableSpecification extends Specification {
         }
         def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()])
         def mongoIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference,
-                readConcern, writeConcern, executor, 'map', 'reduce')
+                readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS)
 
         when:
         def results = mongoIterable.first()
@@ -318,7 +317,7 @@ class MapReduceIterableSpecification extends Specification {
         when:
         def batchSize = 5
         def mongoIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference,
-                readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce')
+                readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce', TIMEOUT_SETTINGS)
 
         then:
         mongoIterable.getBatchSize() == null
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy
new file mode 100644
index 00000000000..62c16330950
--- /dev/null
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.internal
+
+import com.mongodb.MongoClientSettings
+import com.mongodb.MongoNamespace
+import com.mongodb.ReadConcern
+import com.mongodb.ReadPreference
+import com.mongodb.WriteConcern
+import com.mongodb.client.ClientSession
+import com.mongodb.client.MongoClient
+import com.mongodb.client.MongoIterable
+import com.mongodb.internal.TimeoutSettings
+import com.mongodb.internal.client.model.changestream.ChangeStreamLevel
+import com.mongodb.internal.connection.Cluster
+import com.mongodb.internal.session.ServerSessionPool
+import org.bson.BsonDocument
+import org.bson.Document
+import org.bson.codecs.UuidCodec
+import org.bson.codecs.ValueCodecProvider
+import org.bson.codecs.configuration.CodecRegistry
+import spock.lang.Specification
+
+import java.util.concurrent.TimeUnit
+
+import static com.mongodb.CustomMatchers.isTheSameAs
+import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry
+import static com.mongodb.ReadPreference.primary
+import static com.mongodb.ReadPreference.secondary
+import static com.mongodb.client.internal.TestHelper.execute
+import static org.bson.UuidRepresentation.UNSPECIFIED
+import static org.bson.codecs.configuration.CodecRegistries.fromProviders
+import static spock.util.matcher.HamcrestSupport.expect
+
+class MongoClusterSpecification extends Specification {
+
+    private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider())
+    private static final MongoClientSettings CLIENT_SETTINGS = MongoClientSettings.builder().build()
+    private static final TimeoutSettings TIMEOUT_SETTINGS = TimeoutSettings.create(CLIENT_SETTINGS)
+    private final Cluster cluster = Stub(Cluster)
+    private final MongoClient originator =  Stub(MongoClient)
+    private final ServerSessionPool serverSessionPool = Stub(ServerSessionPool)
+    private final OperationExecutor operationExecutor = Stub(OperationExecutor)
+
+    def 'should pass the correct settings to getDatabase'() {
+        given:
+        def settings = MongoClientSettings.builder()
+                .readPreference(secondary())
+                .writeConcern(WriteConcern.MAJORITY)
+                .readConcern(ReadConcern.MAJORITY)
+                .retryWrites(true)
+                .codecRegistry(CODEC_REGISTRY)
+                .build()
+        def operationExecutor = new TestOperationExecutor([])
+        def mongoClientCluster = createMongoCluster(settings, operationExecutor)
+
+        when:
+        def database = mongoClientCluster.getDatabase('name')
+
+        then:
+        expect database, isTheSameAs(expectedDatabase)
+
+        where:
+        expectedDatabase << new MongoDatabaseImpl('name', CODEC_REGISTRY, secondary(),
+                WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null,
+                TIMEOUT_SETTINGS, new TestOperationExecutor([]))
+    }
+
+    def 'should behave correctly when using withCodecRegistry'() {
+        given:
+        def newCodecRegistry = fromProviders(new ValueCodecProvider())
+
+        when:
+        def mongoCluster = createMongoCluster().withCodecRegistry(newCodecRegistry)
+
+        then:
+        (mongoCluster.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == UNSPECIFIED
+        expect mongoCluster, isTheSameAs(createMongoCluster(
+                MongoClientSettings.builder(CLIENT_SETTINGS).codecRegistry(newCodecRegistry).build()))
+    }
+
+    def 'should behave correctly when using withReadPreference'() {
+        given:
+        def newReadPreference = ReadPreference.secondaryPreferred()
+
+        when:
+        def mongoCluster =  createMongoCluster().withReadPreference(newReadPreference)
+
+        then:
+        mongoCluster.getReadPreference() == newReadPreference
+        expect mongoCluster, isTheSameAs(
+                createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS).readPreference(newReadPreference).build()))
+    }
+
+    def 'should behave correctly when using withWriteConcern'() {
+        given:
+        def newWriteConcern = WriteConcern.MAJORITY
+
+        when:
+        def mongoCluster =  createMongoCluster().withWriteConcern(newWriteConcern)
+
+        then:
+        mongoCluster.getWriteConcern() == newWriteConcern
+        expect mongoCluster, isTheSameAs(createMongoCluster(
+                MongoClientSettings.builder(CLIENT_SETTINGS).writeConcern(newWriteConcern).build()))
+    }
+
+    def 'should behave correctly when using withReadConcern'() {
+        given:
+        def newReadConcern = ReadConcern.MAJORITY
+
+        when:
+        def mongoCluster =  createMongoCluster().withReadConcern(newReadConcern)
+
+        then:
+        mongoCluster.getReadConcern() == newReadConcern
+        expect mongoCluster, isTheSameAs(createMongoCluster(
+                MongoClientSettings.builder(CLIENT_SETTINGS).readConcern(newReadConcern).build()))
+    }
+
+    def 'should behave correctly when using withTimeout'() {
+        when:
+        def mongoCluster =  createMongoCluster().withTimeout(10_000, TimeUnit.MILLISECONDS)
+
+        then:
+        mongoCluster.getTimeout(TimeUnit.MILLISECONDS) == 10_000
+        expect mongoCluster, isTheSameAs(createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS)
+                .timeout(10_000, TimeUnit.MILLISECONDS).build()))
+
+        when:
+        createMongoCluster().withTimeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
+    }
+
+
+    def 'should use ListDatabasesIterableImpl correctly'() {
+        given:
+        def executor = new TestOperationExecutor([null, null])
+        def mongoCluster = createMongoCluster(executor)
+        def listDatabasesMethod = mongoCluster.&listDatabases
+        def listDatabasesNamesMethod = mongoCluster.&listDatabaseNames
+
+        when:
+        def listDatabasesIterable = execute(listDatabasesMethod, session)
+
+        then:
+        expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document,
+                CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS))
+
+        when:
+        listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument)
+
+        then:
+        expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument,
+                CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS))
+
+        when:
+        def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable<String>
+
+        then:
+        // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it
+        expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument,
+                CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS)
+                .nameOnly(true))
+
+        where:
+        session << [null, Stub(ClientSession)]
+    }
+
+    def 'should create ChangeStreamIterable correctly'() {
+        given:
+        def executor = new TestOperationExecutor([])
+        def namespace = new MongoNamespace('admin', 'ignored')
+        def settings = MongoClientSettings.builder()
+                .readPreference(secondary())
+                .readConcern(ReadConcern.MAJORITY)
+                .codecRegistry(getDefaultCodecRegistry())
+                .build()
+        def readPreference = settings.getReadPreference()
+        def readConcern = settings.getReadConcern()
+        def mongoCluster = createMongoCluster(settings, executor)
+        def watchMethod = mongoCluster.&watch
+
+        when:
+        def changeStreamIterable = execute(watchMethod, session)
+
+        then:
+        expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry,
+                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS),
+                ['codec'])
+
+        when:
+        changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)])
+
+        then:
+        expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry,
+                readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT,
+                true, TIMEOUT_SETTINGS), ['codec'])
+
+        when:
+        changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument)
+
+        then:
+        expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry,
+                readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument,
+                ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec'])
+
+        where:
+        session << [null, Stub(ClientSession)]
+    }
+
+    def 'should validate the ChangeStreamIterable pipeline data correctly'() {
+        given:
+        def executor = new TestOperationExecutor([])
+        def mongoCluster = createMongoCluster(executor)
+
+        when:
+        mongoCluster.watch((Class) null)
+
+        then:
+        thrown(IllegalArgumentException)
+
+        when:
+        mongoCluster.watch([null]).into([])
+
+        then:
+        thrown(IllegalArgumentException)
+    }
+
+    MongoClusterImpl createMongoCluster() {
+        createMongoCluster(CLIENT_SETTINGS)
+    }
+
+    MongoClusterImpl createMongoCluster(final MongoClientSettings settings) {
+        createMongoCluster(settings, operationExecutor)
+    }
+
+    MongoClusterImpl createMongoCluster(final OperationExecutor operationExecutor) {
+        createMongoCluster(CLIENT_SETTINGS, operationExecutor)
+    }
+
+    MongoClusterImpl createMongoCluster(final MongoClientSettings settings, final OperationExecutor operationExecutor) {
+        new MongoClusterImpl(null, cluster, settings.codecRegistry, null, null,
+                originator, operationExecutor, settings.readConcern, settings.readPreference, settings.retryReads, settings.retryWrites,
+                null, serverSessionPool, TimeoutSettings.create(settings), settings.uuidRepresentation, settings.writeConcern)
+    }
+}
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy
index 5951a5b6589..2fba3b90a0a 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy
@@ -92,6 +92,7 @@ import spock.lang.Specification
 
 import java.util.concurrent.TimeUnit
 
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.ReadPreference.secondary
@@ -122,7 +123,7 @@ class MongoCollectionSpecification extends Specification {
     def 'should return the correct name from getName'() {
         given:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true,
-                true, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([null]))
+                true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([null]))
 
         expect:
         collection.getNamespace() == namespace
@@ -135,12 +136,12 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor).withDocumentClass(newClass)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withDocumentClass(newClass)
 
         then:
         collection.getDocumentClass() == newClass
         expect collection, isTheSameAs(new MongoCollectionImpl(namespace, newClass, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withCodecRegistry'() {
@@ -150,12 +151,12 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, C_SHARP_LEGACY, null, executor).withCodecRegistry(newCodecRegistry)
+                true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor).withCodecRegistry(newCodecRegistry)
 
         then:
         (collection.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY
         expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, collection.getCodecRegistry(), readPreference,
-                ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, executor))
+                ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withReadPreference'() {
@@ -165,12 +166,12 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor).withReadPreference(newReadPreference)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadPreference(newReadPreference)
 
         then:
         collection.getReadPreference() == newReadPreference
         expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, newReadPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withWriteConcern'() {
@@ -180,12 +181,12 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor).withWriteConcern(newWriteConcern)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withWriteConcern(newWriteConcern)
 
         then:
         collection.getWriteConcern() == newWriteConcern
         expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, newWriteConcern,
-                true, true, readConcern, JAVA_LEGACY, null, executor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withReadConcern'() {
@@ -195,12 +196,33 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor).withReadConcern(newReadConcern)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadConcern(newReadConcern)
 
         then:
         collection.getReadConcern() == newReadConcern
         expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, newReadConcern, JAVA_LEGACY, null, executor))
+                true, true, newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
+    }
+
+    def 'should behave correctly when using withTimeout'() {
+        given:
+        def executor = new TestOperationExecutor([])
+        def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+
+        when:
+        def newCollection = collection.withTimeout(10_000, MILLISECONDS)
+
+        then:
+        newCollection.getTimeout(MILLISECONDS) == 10_000
+        expect newCollection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, MILLISECONDS), executor))
+
+        when:
+        collection.withTimeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
     }
 
     def 'should use CountOperation correctly with documentCount'() {
@@ -208,8 +230,9 @@ class MongoCollectionSpecification extends Specification {
         def executor = new TestOperationExecutor([1L, 2L, 3L, 4L])
         def filter = new BsonDocument()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true,
-                true, readConcern, JAVA_LEGACY, null, executor)
-        def expectedOperation = new CountDocumentsOperation(namespace).filter(filter).retryReads(true)
+                true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+        def expectedOperation = new CountDocumentsOperation(namespace)
+                .filter(filter).retryReads(true)
 
         def countMethod = collection.&countDocuments
 
@@ -232,13 +255,12 @@ class MongoCollectionSpecification extends Specification {
 
         when:
         def hint = new BsonDocument('hint', new BsonInt32(1))
-        execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100)
-                .maxTime(100, MILLISECONDS).collation(collation))
+        execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100).collation(collation))
         operation = executor.getReadOperation() as CountDocumentsOperation
 
         then:
         executor.getClientSession() == session
-        expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100).maxTime(100, MILLISECONDS)
+        expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100)
                 .collation(collation))
 
         where:
@@ -249,7 +271,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([1L, 2L])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true,
-                true, readConcern, JAVA_LEGACY, null, executor)
+                true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = new EstimatedDocumentCountOperation(namespace)
                 .retryReads(true)
 
@@ -264,12 +286,13 @@ class MongoCollectionSpecification extends Specification {
         expect operation, isTheSameAs(expectedOperation)
 
         when:
+        expectedOperation = new EstimatedDocumentCountOperation(namespace).retryReads(true)
         execute(countMethod, session, new EstimatedDocumentCountOptions().maxTime(100, MILLISECONDS))
         operation = executor.getReadOperation() as EstimatedDocumentCountOperation
 
         then:
         executor.getClientSession() == session
-        expect operation, isTheSameAs(expectedOperation.maxTime(100, MILLISECONDS))
+        expect operation, isTheSameAs(expectedOperation)
 
         where:
         session << [null]
@@ -279,7 +302,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def filter = new Document('a', 1)
         def distinctMethod = collection.&distinct
 
@@ -288,14 +311,14 @@ class MongoCollectionSpecification extends Specification {
 
         then:
         expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String,
-                codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true))
+                codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS))
 
         when:
         distinctIterable = execute(distinctMethod, session, 'field', String).filter(filter)
 
         then:
         expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String,
-                codecRegistry, readPreference, readConcern, executor, 'field', filter, true))
+                codecRegistry, readPreference, readConcern, executor, 'field', filter, true, TIMEOUT_SETTINGS))
 
         where:
         session << [null, Stub(ClientSession)]
@@ -305,7 +328,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def findMethod = collection.&find
 
         when:
@@ -313,28 +336,28 @@ class MongoCollectionSpecification extends Specification {
 
         then:
         expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, codecRegistry,
-                readPreference, readConcern, executor, new BsonDocument(), true))
+                readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS))
 
         when:
         findIterable = execute(findMethod, session, BsonDocument)
 
         then:
         expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument,
-                codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true))
+                codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS))
 
         when:
         findIterable = execute(findMethod, session, new Document())
 
         then:
         expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document,
-                codecRegistry, readPreference, readConcern, executor, new Document(), true))
+                codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS))
 
         when:
         findIterable = execute(findMethod, session, new Document(), BsonDocument)
 
         then:
         expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument,
-                codecRegistry, readPreference, readConcern, executor, new Document(), true))
+                codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS))
 
         where:
         session << [null, Stub(ClientSession)]
@@ -344,7 +367,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def aggregateMethod = collection.&aggregate
 
         when:
@@ -353,7 +376,7 @@ class MongoCollectionSpecification extends Specification {
         then:
         expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, Document,
                 codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)],
-                AggregationLevel.COLLECTION, true))
+                AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS))
 
         when:
         aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument)
@@ -361,7 +384,7 @@ class MongoCollectionSpecification extends Specification {
         then:
         expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, BsonDocument,
                 codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)],
-                AggregationLevel.COLLECTION, true))
+                AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS))
 
         where:
         session << [null, Stub(ClientSession)]
@@ -371,7 +394,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.aggregate(null)
@@ -390,7 +413,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def watchMethod = collection.&watch
 
         when:
@@ -398,7 +421,7 @@ class MongoCollectionSpecification extends Specification {
 
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
-                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true),
+                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS),
                 ['codec'])
 
         when:
@@ -407,7 +430,7 @@ class MongoCollectionSpecification extends Specification {
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
                 readPreference, readConcern, executor, [new Document('$match', 1)], Document,
-                ChangeStreamLevel.COLLECTION, true), ['codec'])
+                ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec'])
 
         when:
         changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument)
@@ -415,7 +438,7 @@ class MongoCollectionSpecification extends Specification {
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
                 readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument,
-                ChangeStreamLevel.COLLECTION, true), ['codec'])
+                ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec'])
 
         where:
         session << [null, Stub(ClientSession)]
@@ -425,7 +448,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.watch((Class) null)
@@ -444,7 +467,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def mapReduceMethod = collection.&mapReduce
 
         when:
@@ -452,14 +475,14 @@ class MongoCollectionSpecification extends Specification {
 
         then:
         expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, Document,
-                codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce'))
+                codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS))
 
         when:
         mapReduceIterable = execute(mapReduceMethod, session, 'map', 'reduce', BsonDocument)
 
         then:
         expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, BsonDocument,
-                codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce'))
+                codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS))
 
         where:
         session << [null, Stub(ClientSession)]
@@ -471,7 +494,7 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, BsonDocument, codecRegistry, readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassValidation, List<Bson> filters ->
             new MixedBulkWriteOperation(namespace, [
                     new InsertRequest(BsonDocument.parse('{_id: 1}')),
@@ -538,7 +561,7 @@ class MongoCollectionSpecification extends Specification {
         def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()])
         def executor = new TestOperationExecutor([new MongoException('failure')])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.bulkWrite(null)
@@ -565,7 +588,7 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = { WriteConcern wc, Boolean bypassDocumentValidation ->
             new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))],
                     true, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation)
@@ -610,7 +633,7 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassDocumentValidation ->
             new MixedBulkWriteOperation(namespace,
                     [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))),
@@ -656,7 +679,7 @@ class MongoCollectionSpecification extends Specification {
     def 'should validate the insertMany data correctly'() {
         given:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor))
 
         when:
         collection.insertMany(null)
@@ -678,7 +701,7 @@ class MongoCollectionSpecification extends Specification {
         })
         def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def deleteOneMethod = collection.&deleteOne
 
         when:
@@ -720,7 +743,7 @@ class MongoCollectionSpecification extends Specification {
 
         def executor = new TestOperationExecutor([bulkWriteException])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.deleteOne(new Document('_id', 1))
@@ -741,7 +764,7 @@ class MongoCollectionSpecification extends Specification {
         })
         def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def deleteManyMethod = collection.&deleteMany
 
         when:
@@ -785,7 +808,7 @@ class MongoCollectionSpecification extends Specification {
         def expectedResult = writeConcern.isAcknowledged() ?
                 UpdateResult.acknowledged(1, modifiedCount, upsertedId) : UpdateResult.unacknowledged()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassValidation, Collation collation ->
             new MixedBulkWriteOperation(namespace,
@@ -827,7 +850,7 @@ class MongoCollectionSpecification extends Specification {
 
         def executor = new TestOperationExecutor([bulkWriteException])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.replaceOne(new Document('_id', 1), new Document('_id', 1))
@@ -855,7 +878,7 @@ class MongoCollectionSpecification extends Specification {
         })
         def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, 0, null) : UpdateResult.unacknowledged()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation,
                                   List<Bson> filters, BsonDocument hintDoc, String hintStr ->
             new MixedBulkWriteOperation(namespace,
@@ -904,7 +927,7 @@ class MongoCollectionSpecification extends Specification {
         })
         def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(5, 3, null) : UpdateResult.unacknowledged()
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation,
                                   List<Bson> filters, BsonDocument hintDoc, String hintStr ->
             new MixedBulkWriteOperation(namespace,
@@ -948,7 +971,7 @@ class MongoCollectionSpecification extends Specification {
     def 'should translate MongoBulkWriteException to MongoWriteException'() {
         given:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.insertOne(new Document('_id', 1))
@@ -970,7 +993,7 @@ class MongoCollectionSpecification extends Specification {
                 new WriteConcernError(42, 'codeName', 'Message', new BsonDocument()),
                 new ServerAddress(), [] as Set)])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         collection.insertOne(new Document('_id', 1))
@@ -986,8 +1009,9 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, ACKNOWLEDGED,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
-        def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites,  new DocumentCodec())
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+        def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites,
+                new DocumentCodec())
                 .filter(new BsonDocument('a', new BsonInt32(1)))
         def findOneAndDeleteMethod = collection.&findOneAndDelete
 
@@ -999,14 +1023,20 @@ class MongoCollectionSpecification extends Specification {
         expect operation, isTheSameAs(expectedOperation)
 
         when:
+        expectedOperation =
+                new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec())
+                        .filter(new BsonDocument('a', new BsonInt32(1)))
+                        .projection(new BsonDocument('projection', new BsonInt32(1)))
+                        .collation(collation)
         execute(findOneAndDeleteMethod, session, new Document('a', 1),
-                new FindOneAndDeleteOptions().projection(new Document('projection', 1))
-                        .maxTime(100, MILLISECONDS).collation(collation))
+                new FindOneAndDeleteOptions()
+                        .projection(new Document('projection', 1))
+                        .maxTime(100, MILLISECONDS)
+                        .collation(collation))
         operation = executor.getWriteOperation() as FindAndDeleteOperation
 
         then:
-        expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1)))
-                .maxTime(100, MILLISECONDS).collation(collation))
+        expect operation, isTheSameAs(expectedOperation)
 
         where:
         [writeConcern, session, retryWrites] << [
@@ -1022,9 +1052,10 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : WriteConcernResult.unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
-        def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, retryWrites, new DocumentCodec(),
-                new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1)))
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+        def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern,
+                retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10)))
+                .filter(new BsonDocument('a', new BsonInt32(1)))
         def findOneAndReplaceMethod = collection.&findOneAndReplace
 
         when:
@@ -1035,24 +1066,22 @@ class MongoCollectionSpecification extends Specification {
         expect operation, isTheSameAs(expectedOperation)
 
         when:
+        expectedOperation = new FindAndReplaceOperation(namespace, writeConcern,
+                retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10)))
+                .filter(new BsonDocument('a', new BsonInt32(1)))
+                .projection(new BsonDocument('projection', new BsonInt32(1)))
+                .bypassDocumentValidation(false)
+                .collation(collation)
         execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10),
-                new FindOneAndReplaceOptions().projection(new Document('projection', 1))
-                        .maxTime(100, MILLISECONDS).bypassDocumentValidation(false))
-        operation = executor.getWriteOperation() as FindAndReplaceOperation
-
-        then:
-        expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1)))
-                .maxTime(100, MILLISECONDS).bypassDocumentValidation(false))
-
-        when:
-        execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10),
-                new FindOneAndReplaceOptions().projection(new Document('projection', 1))
-                        .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation))
+                new FindOneAndReplaceOptions()
+                        .projection(new Document('projection', 1))
+                        .maxTime(100, MILLISECONDS)
+                        .bypassDocumentValidation(false)
+                        .collation(collation))
         operation = executor.getWriteOperation() as FindAndReplaceOperation
 
         then:
-        expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1)))
-                .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation))
+        expect operation, isTheSameAs(expectedOperation)
 
         where:
         [writeConcern, session, retryWrites] << [
@@ -1068,9 +1097,10 @@ class MongoCollectionSpecification extends Specification {
             writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged()
         })
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry,  readPreference, writeConcern,
-                retryWrites, true, readConcern, JAVA_LEGACY, null, executor)
-        def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, new DocumentCodec(),
-                new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1)))
+                retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+        def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites,
+                new DocumentCodec(), new BsonDocument('a', new BsonInt32(10)))
+                .filter(new BsonDocument('a', new BsonInt32(1)))
         def findOneAndUpdateMethod = collection.&findOneAndUpdate
 
         when:
@@ -1081,15 +1111,25 @@ class MongoCollectionSpecification extends Specification {
         expect operation, isTheSameAs(expectedOperation)
 
         when:
+        expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites,
+                new DocumentCodec(), new BsonDocument('a', new BsonInt32(10)))
+                .filter(new BsonDocument('a', new BsonInt32(1)))
+                .projection(new BsonDocument('projection', new BsonInt32(1)))
+                .bypassDocumentValidation(bypassDocumentValidation)
+                .collation(collation)
+                .arrayFilters(arrayFilters)
+
         execute(findOneAndUpdateMethod, session, new Document('a', 1), new Document('a', 10),
-                new FindOneAndUpdateOptions().projection(new Document('projection', 1)).maxTime(100, MILLISECONDS)
-                        .bypassDocumentValidation(bypassDocumentValidation).collation(collation).arrayFilters(arrayFilters))
+                new FindOneAndUpdateOptions()
+                        .projection(new Document('projection', 1))
+                        .maxTime(100, MILLISECONDS)
+                        .bypassDocumentValidation(bypassDocumentValidation)
+                        .collation(collation)
+                        .arrayFilters(arrayFilters))
         operation = executor.getWriteOperation() as FindAndUpdateOperation
 
         then:
-        expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1)))
-                .maxTime(100, MILLISECONDS).bypassDocumentValidation(bypassDocumentValidation).collation(collation)
-                .arrayFilters(arrayFilters))
+        expect operation, isTheSameAs(expectedOperation)
 
         where:
         [writeConcern, arrayFilters, bypassDocumentValidation, session, retryWrites] << [
@@ -1105,7 +1145,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = new DropCollectionOperation(namespace, ACKNOWLEDGED)
         def dropMethod = collection.&drop
 
@@ -1125,7 +1165,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null, null, null, null])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def createIndexMethod = collection.&createIndex
         def createIndexesMethod = collection.&createIndexes
 
@@ -1153,10 +1193,12 @@ class MongoCollectionSpecification extends Specification {
         indexNames == ['key_1', 'key1_1']
 
         when:
-        expectedOperation = expectedOperation.maxTime(10, MILLISECONDS)
+        expectedOperation = new CreateIndexesOperation(namespace,
+                [new IndexRequest(new BsonDocument('key', new BsonInt32(1))),
+                 new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED)
         indexNames = execute(createIndexesMethod, session,
                 [new IndexModel(new Document('key', 1)), new IndexModel(new Document('key1', 1))],
-                new CreateIndexOptions().maxTime(10, MILLISECONDS))
+                new CreateIndexOptions().maxTime(100, MILLISECONDS))
         operation = executor.getWriteOperation() as CreateIndexesOperation
 
         then:
@@ -1236,7 +1278,7 @@ class MongoCollectionSpecification extends Specification {
     def 'should validate the createIndexes data correctly'() {
         given:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor))
 
         when:
         collection.createIndexes(null)
@@ -1256,7 +1298,7 @@ class MongoCollectionSpecification extends Specification {
         def batchCursor = Stub(BatchCursor)
         def executor = new TestOperationExecutor([batchCursor, batchCursor, batchCursor])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def listIndexesMethod = collection.&listIndexes
 
         when:
@@ -1277,12 +1319,12 @@ class MongoCollectionSpecification extends Specification {
         executor.getClientSession() == session
 
         when:
-        execute(listIndexesMethod, session).batchSize(10).maxTime(10, MILLISECONDS).iterator()
+        execute(listIndexesMethod, session).batchSize(10).maxTime(100, MILLISECONDS).iterator()
         operation = executor.getReadOperation() as ListIndexesOperation
 
         then:
         expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).batchSize(10)
-                .maxTime(10, MILLISECONDS).retryReads(true))
+                .retryReads(true))
         executor.getClientSession() == session
 
         where:
@@ -1293,7 +1335,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null, null])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def dropIndexMethod = collection.&dropIndex
 
         when:
@@ -1316,8 +1358,8 @@ class MongoCollectionSpecification extends Specification {
         executor.getClientSession() == session
 
         when:
-        expectedOperation = expectedOperation.maxTime(10, MILLISECONDS)
-        execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(10, MILLISECONDS))
+        expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED)
+        execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(100, MILLISECONDS))
         operation = executor.getWriteOperation() as DropIndexOperation
 
         then:
@@ -1332,7 +1374,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED)
         def dropIndexesMethod = collection.&dropIndexes
 
@@ -1345,8 +1387,8 @@ class MongoCollectionSpecification extends Specification {
         executor.getClientSession() == session
 
         when:
-        expectedOperation = expectedOperation.maxTime(10, MILLISECONDS)
-        execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(10, MILLISECONDS))
+        expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED)
+        execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(100, MILLISECONDS))
         operation = executor.getWriteOperation() as DropIndexOperation
 
         then:
@@ -1361,7 +1403,7 @@ class MongoCollectionSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null])
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def newNamespace = new MongoNamespace(namespace.getDatabaseName(), 'newName')
         def renameCollectionOptions = new RenameCollectionOptions().dropTarget(dropTarget)
         def expectedOperation = new RenameCollectionOperation(namespace, newNamespace, ACKNOWLEDGED)
@@ -1392,7 +1434,7 @@ class MongoCollectionSpecification extends Specification {
         def executor = new TestOperationExecutor([acknowledged(INSERT, 1, 0, [], [])])
         def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry)
         def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def document = new ImmutableDocument(['a': 1])
 
         when:
@@ -1414,7 +1456,7 @@ class MongoCollectionSpecification extends Specification {
         def executor = new TestOperationExecutor([null])
         def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry)
         def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, executor)
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def document = new ImmutableDocument(['a': 1])
 
         when:
@@ -1434,7 +1476,8 @@ class MongoCollectionSpecification extends Specification {
     def 'should validate the client session correctly'() {
         given:
         def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED,
-                true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor))
+                true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS,
+                Stub(OperationExecutor))
 
         when:
         collection.aggregate(null, [Document.parse('{$match:{}}')])
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy
index 81cbad9f34f..e702dd5e276 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy
@@ -44,6 +44,9 @@ import org.bson.codecs.UuidCodec
 import org.bson.codecs.ValueCodecProvider
 import spock.lang.Specification
 
+import java.util.concurrent.TimeUnit
+
+import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS
 import static com.mongodb.CustomMatchers.isTheSameAs
 import static com.mongodb.ReadPreference.primary
 import static com.mongodb.ReadPreference.primaryPreferred
@@ -66,7 +69,7 @@ class MongoDatabaseSpecification extends Specification {
     def 'should throw IllegalArgumentException if name is invalid'() {
         when:
         new MongoDatabaseImpl('a.b', codecRegistry, readPreference, writeConcern, false, false, readConcern,
-                JAVA_LEGACY, null, new TestOperationExecutor([]))
+                JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([]))
 
         then:
         thrown(IllegalArgumentException)
@@ -75,7 +78,7 @@ class MongoDatabaseSpecification extends Specification {
     def 'should throw IllegalArgumentException from getCollection if collectionName is invalid'() {
         given:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern,
-                JAVA_LEGACY, null, new TestOperationExecutor([]))
+                JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([]))
 
         when:
         database.getCollection('')
@@ -87,7 +90,7 @@ class MongoDatabaseSpecification extends Specification {
     def 'should return the correct name from getName'() {
         given:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern,
-                JAVA_LEGACY, null, new TestOperationExecutor([]))
+                JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([]))
 
         expect:
         database.getName() == name
@@ -100,13 +103,13 @@ class MongoDatabaseSpecification extends Specification {
 
         when:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, true, readConcern,
-                C_SHARP_LEGACY, null, executor)
+                C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor)
                 .withCodecRegistry(newCodecRegistry)
 
         then:
         (database.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY
         expect database, isTheSameAs(new MongoDatabaseImpl(name, database.getCodecRegistry(), readPreference, writeConcern,
-                false, true, readConcern, C_SHARP_LEGACY, null, executor))
+                false, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withReadPreference'() {
@@ -116,13 +119,13 @@ class MongoDatabaseSpecification extends Specification {
 
         when:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
                 .withReadPreference(newReadPreference)
 
         then:
         database.getReadPreference() == newReadPreference
         expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, newReadPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor))
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withWriteConcern'() {
@@ -132,13 +135,13 @@ class MongoDatabaseSpecification extends Specification {
 
         when:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
                 .withWriteConcern(newWriteConcern)
 
         then:
         database.getWriteConcern() == newWriteConcern
         expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, newWriteConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor))
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
     }
 
     def 'should behave correctly when using withReadConcern'() {
@@ -148,13 +151,34 @@ class MongoDatabaseSpecification extends Specification {
 
         when:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
                 .withReadConcern(newReadConcern)
 
         then:
         database.getReadConcern() == newReadConcern
         expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                newReadConcern, JAVA_LEGACY, null, executor))
+                newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor))
+    }
+
+    def 'should behave correctly when using withTimeout'() {
+        given:
+        def executor = new TestOperationExecutor([])
+        def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
+
+        when:
+        def newDatabase = database.withTimeout(10_000, TimeUnit.MILLISECONDS)
+
+        then:
+        newDatabase.getTimeout(TimeUnit.MILLISECONDS) == 10_000
+        expect newDatabase, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
+                readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, TimeUnit.MILLISECONDS), executor))
+
+        when:
+        database.withTimeout(500, TimeUnit.NANOSECONDS)
+
+        then:
+        thrown(IllegalArgumentException)
     }
 
     def 'should be able to executeCommand correctly'() {
@@ -162,42 +186,38 @@ class MongoDatabaseSpecification extends Specification {
         def command = new BsonDocument('command', new BsonInt32(1))
         def executor = new TestOperationExecutor([null, null, null, null])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def runCommandMethod = database.&runCommand
 
         when:
         execute(runCommandMethod, session, command)
-        def operation = executor.getReadOperation() as CommandReadOperation<Document>
+         executor.getReadOperation() as CommandReadOperation<Document>
 
         then:
-        operation.command == command
         executor.getClientSession() == session
         executor.getReadPreference() == primary()
 
         when:
         execute(runCommandMethod, session, command, primaryPreferred())
-        operation = executor.getReadOperation() as CommandReadOperation<Document>
+        executor.getReadOperation() as CommandReadOperation<Document>
 
         then:
-        operation.command == command
         executor.getClientSession() == session
         executor.getReadPreference() == primaryPreferred()
 
         when:
         execute(runCommandMethod, session, command, BsonDocument)
-        operation = executor.getReadOperation() as CommandReadOperation<BsonDocument>
+        executor.getReadOperation() as CommandReadOperation<BsonDocument>
 
         then:
-        operation.command == command
         executor.getClientSession() == session
         executor.getReadPreference() == primary()
 
         when:
         execute(runCommandMethod, session, command, primaryPreferred(), BsonDocument)
-        operation = executor.getReadOperation() as CommandReadOperation<BsonDocument>
+        executor.getReadOperation() as CommandReadOperation<BsonDocument>
 
         then:
-        operation.command == command
         executor.getClientSession() == session
         executor.getReadPreference() == primaryPreferred()
 
@@ -209,7 +229,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def dropMethod = database.&drop
 
         when:
@@ -228,7 +248,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([null, null, null])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def listCollectionsMethod = database.&listCollections
         def listCollectionNamesMethod = database.&listCollectionNames
 
@@ -237,14 +257,14 @@ class MongoDatabaseSpecification extends Specification {
 
         then:
         expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false,
-                Document, codecRegistry, primary(), executor, false))
+                Document, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS))
 
         when:
         listCollectionIterable = execute(listCollectionsMethod, session, BsonDocument)
 
         then:
         expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false,
-                BsonDocument, codecRegistry, primary(), executor, false))
+                BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS))
 
         when:
         def listCollectionNamesIterable = execute(listCollectionNamesMethod, session)
@@ -252,7 +272,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         // `listCollectionNamesIterable` is an instance of a `ListCollectionNamesIterableImpl`, so have to get the wrapped iterable from it
         expect listCollectionNamesIterable.getWrapped(), isTheSameAs(new ListCollectionsIterableImpl<>(session, name,
-                true, BsonDocument, codecRegistry, primary(), executor, false))
+                true, BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS))
 
         where:
         session << [null, Stub(ClientSession)]
@@ -263,7 +283,7 @@ class MongoDatabaseSpecification extends Specification {
         def collectionName = 'collectionName'
         def executor = new TestOperationExecutor([null, null])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def createCollectionMethod = database.&createCollection
 
         when:
@@ -314,7 +334,7 @@ class MongoDatabaseSpecification extends Specification {
         def writeConcern = WriteConcern.JOURNALED
         def executor = new TestOperationExecutor([null, null])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def createViewMethod = database.&createView
 
         when:
@@ -344,7 +364,7 @@ class MongoDatabaseSpecification extends Specification {
         def viewName = 'view1'
         def viewOn = 'col1'
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern, JAVA_LEGACY, null, Stub(OperationExecutor))
+                readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor))
 
         when:
         database.createView(viewName, viewOn, null)
@@ -364,7 +384,7 @@ class MongoDatabaseSpecification extends Specification {
         def executor = new TestOperationExecutor([])
         def namespace = new MongoNamespace(name, 'ignored')
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def watchMethod = database.&watch
 
         when:
@@ -372,7 +392,7 @@ class MongoDatabaseSpecification extends Specification {
 
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
-                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false),
+                readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS),
                 ['codec'])
 
         when:
@@ -381,7 +401,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
                 readPreference, readConcern, executor, [new Document('$match', 1)], Document,
-                ChangeStreamLevel.DATABASE, false), ['codec'])
+                ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec'])
 
         when:
         changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument)
@@ -389,7 +409,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry,
                 readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument,
-                ChangeStreamLevel.DATABASE, false), ['codec'])
+                ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec'])
 
         where:
         session << [null, Stub(ClientSession)]
@@ -399,7 +419,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         database.watch((Class) null)
@@ -418,7 +438,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
         def aggregateMethod = database.&aggregate
 
         when:
@@ -427,7 +447,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document,
                 codecRegistry, readPreference, readConcern, writeConcern, executor, [], AggregationLevel.DATABASE,
-                false), ['codec'])
+                false, TIMEOUT_SETTINGS), ['codec'])
 
         when:
         aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)])
@@ -435,7 +455,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document,
                 codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)],
-                AggregationLevel.DATABASE, false), ['codec'])
+                AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec'])
 
         when:
         aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument)
@@ -443,7 +463,7 @@ class MongoDatabaseSpecification extends Specification {
         then:
         expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, BsonDocument,
                 codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)],
-                AggregationLevel.DATABASE, false), ['codec'])
+                AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec'])
 
         where:
         session << [null, Stub(ClientSession)]
@@ -453,7 +473,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def executor = new TestOperationExecutor([])
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false,
-                readConcern,  JAVA_LEGACY, null, executor)
+                readConcern,  JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)
 
         when:
         database.aggregate(null, [])
@@ -478,7 +498,7 @@ class MongoDatabaseSpecification extends Specification {
         given:
         def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()])
         def database = new MongoDatabaseImpl('databaseName', codecRegistry, secondary(), WriteConcern.MAJORITY, true, true,
-                ReadConcern.MAJORITY, JAVA_LEGACY, null, new TestOperationExecutor([]))
+                ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([]))
 
         when:
         def collection = database.getCollection('collectionName')
@@ -489,14 +509,14 @@ class MongoDatabaseSpecification extends Specification {
         where:
         expectedCollection = new MongoCollectionImpl<Document>(new MongoNamespace('databaseName', 'collectionName'), Document,
                 fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), secondary(),
-                WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null,
+                WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS,
                 new TestOperationExecutor([]))
     }
 
     def 'should validate the client session correctly'() {
         given:
         def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false,
-                false, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor))
+                false, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor))
 
         when:
         database.createCollection(null, 'newColl')
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java
index a605d6542e7..28206e1be26 100644
--- a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java
@@ -19,6 +19,7 @@
 import com.mongodb.ReadConcern;
 import com.mongodb.ReadPreference;
 import com.mongodb.client.ClientSession;
+import com.mongodb.internal.TimeoutSettings;
 import com.mongodb.internal.operation.ReadOperation;
 import com.mongodb.internal.operation.WriteOperation;
 import com.mongodb.lang.Nullable;
@@ -68,6 +69,16 @@ public <T> T execute(final WriteOperation<T> operation, final ReadConcern readCo
         return getResponse();
     }
 
+    @Override
+    public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) {
+        return this;
+    }
+
+    @Override
+    public TimeoutSettings getTimeoutSettings() {
+        throw new UnsupportedOperationException("Not supported");
+    }
+
     @SuppressWarnings("unchecked")
     private <T> T getResponse() {
         Object response = responses.remove(0);
diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java
new file mode 100644
index 00000000000..c3569624414
--- /dev/null
+++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2008-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.mongodb.client.internal;
+
+import com.mongodb.MongoOperationTimeoutException;
+import com.mongodb.client.MongoCollection;
+import com.mongodb.client.MongoDatabase;
+import com.mongodb.internal.time.Timeout;
+import org.bson.Document;
+import org.junit.jupiter.api.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout;
+import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout;
+import static com.mongodb.internal.mockito.MongoMockito.mock;
+import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.longThat;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoInteractions;
+import static org.mockito.Mockito.when;
+
+@SuppressWarnings("unchecked")
+class TimeoutHelperTest {
+
+    private static final String TIMEOUT_ERROR_MESSAGE = "message";
+
+    @Test
+    void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, null);
+
+        //then
+        assertEquals(collection, result);
+    }
+
+    @Test
+    void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null);
+
+        //then
+        assertEquals(database, result);
+    }
+
+    @Test
+    void shouldSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() {
+        //given
+        MongoCollection<Document> collectionWithTimeout = mock(MongoCollection.class);
+        MongoCollection<Document> collection = mock(MongoCollection.class, mongoCollection -> {
+            when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout);
+        });
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, Timeout.infinite());
+
+        //then
+        assertEquals(collectionWithTimeout, result);
+        verify(collection).withTimeout(0L, TimeUnit.MILLISECONDS);
+    }
+
+    @Test
+    void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() {
+        //given
+        MongoDatabase databaseWithTimeout = mock(MongoDatabase.class);
+        MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> {
+            when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout);
+        });
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite());
+
+        //then
+        assertEquals(databaseWithTimeout, result);
+        verify(database).withTimeout(0L, TimeUnit.MILLISECONDS);
+    }
+
+    @Test
+    void shouldSetRemainingTimeoutOnCollectionWhenTimeout() {
+        //given
+        MongoCollection<Document> collectionWithTimeout = mock(MongoCollection.class);
+        MongoCollection<Document> collection = mock(MongoCollection.class, mongoCollection -> {
+            when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout);
+        });
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoCollection<Document> result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout);
+
+        //then
+        verify(collection).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS));
+        assertEquals(collectionWithTimeout, result);
+    }
+
+    @Test
+    void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() {
+        //given
+        MongoDatabase databaseWithTimeout = mock(MongoDatabase.class);
+        MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> {
+            when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout);
+        });
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout);
+
+        //then
+        verify(database).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS));
+        assertEquals(databaseWithTimeout, result);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoOperationTimeoutException mongoExecutionTimeoutException =
+                assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout));
+
+        //then
+        assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage());
+        verifyNoInteractions(collection);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+        Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        MongoOperationTimeoutException mongoExecutionTimeoutException =
+                assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout));
+
+        //then
+        assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage());
+        verifyNoInteractions(database);
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() {
+        //given
+        MongoCollection<Document> collection = mock(MongoCollection.class);
+        Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout));
+
+        //then
+
+    }
+
+    @Test
+    void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() {
+        //given
+        MongoDatabase database = mock(MongoDatabase.class);
+        Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED);
+
+        //when
+        assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout));
+
+        //then
+        verifyNoInteractions(database);
+    }
+
+}