diff --git a/build.gradle b/build.gradle index 10add6b07ab..6a5dd7517d0 100644 --- a/build.gradle +++ b/build.gradle @@ -157,6 +157,7 @@ configure(scalaProjects) { "-unchecked", "-language:reflectiveCalls", "-Wconf:cat=deprecation:ws,any:e", + "-Wconf:msg=While parsing annotations in:silent", "-Xlint:strict-unsealed-patmat" ] } diff --git a/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeout.java b/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeout.java new file mode 100644 index 00000000000..add7074131d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeout.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.lang.Nullable; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Client Side Operation Timeout. + * + *

Includes support for the deprecated {@code maxTimeMS} and {@code maxCommitTimeMS} operation configurations

+ */ +public class ClientSideOperationTimeout { + + private final Long timeoutMS; + private final long maxAwaitTimeMS; + + // Deprecated operation based operation timeouts + private final long maxTimeMS; + private final long maxCommitTimeMS; + + @Nullable + private Timeout timeout; + + public ClientSideOperationTimeout(@Nullable final Long timeoutMS, + final long maxAwaitTimeMS, + final long maxTimeMS, + final long maxCommitTimeMS) { + isTrueArgument("timeoutMS must be >= 0", timeoutMS == null || timeoutMS >= 0); + this.timeoutMS = timeoutMS; + this.maxAwaitTimeMS = maxAwaitTimeMS; + this.maxTimeMS = timeoutMS == null ? maxTimeMS : 0; + this.maxCommitTimeMS = timeoutMS == null ? maxCommitTimeMS : 0; + + if (timeoutMS != null) { + if (timeoutMS == 0) { + this.timeout = Timeout.infinite(); + } else { + this.timeout = Timeout.startNow(timeoutMS, MILLISECONDS); + } + } + } + + /** + * Allows for the differentiation between users explicitly setting a global operation timeout via {@code timeoutMS}. + * + * @return true if a timeout has been set. + */ + public boolean hasTimeoutMS() { + return timeoutMS != null; + } + + /** + * Checks the expiry of the timeout. + * + * @return true if the timeout has been set and it has expired + */ + public boolean expired() { + return timeout != null && timeout.expired(); + } + + /** + * Returns the remaining {@code timeoutMS} if set or the {@code alternativeTimeoutMS}. + * + * @param alternativeTimeoutMS the alternative timeout. + * @return timeout to use. + */ + public long timeoutOrAlternative(final long alternativeTimeoutMS) { + if (timeoutMS == null) { + return alternativeTimeoutMS; + } else if (timeoutMS == 0) { + return timeoutMS; + } else { + return timeoutRemainingMS(); + } + } + + /** + * Calculates the minimum timeout value between two possible timeouts. + * + * @param alternativeTimeoutMS the alternative timeout + * @return the minimum value to use. + */ + public long calculateMin(final long alternativeTimeoutMS) { + if (timeoutMS == null) { + return alternativeTimeoutMS; + } else if (timeoutMS == 0) { + return alternativeTimeoutMS; + } else if (alternativeTimeoutMS == 0) { + return timeoutRemainingMS(); + } else { + return Math.min(timeoutRemainingMS(), alternativeTimeoutMS); + } + } + + public long getMaxAwaitTimeMS() { + return maxAwaitTimeMS; + } + + public long getMaxTimeMS() { + return timeoutOrAlternative(maxTimeMS); + } + + public long getMaxCommitTimeMS() { + return timeoutOrAlternative(maxCommitTimeMS); + } + + private long timeoutRemainingMS() { + assertNotNull(timeout); + return timeout.isInfinite() ? 0 : timeout.remaining(MILLISECONDS); + } + + @Override + public String toString() { + return "ClientSideOperationTimeout{" + + "timeoutMS=" + timeoutMS + + ", maxAwaitTimeMS=" + maxAwaitTimeMS + + ", maxTimeMS=" + maxTimeMS + + ", maxCommitTimeMS=" + maxCommitTimeMS + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ClientSideOperationTimeout that = (ClientSideOperationTimeout) o; + return maxAwaitTimeMS == that.maxAwaitTimeMS && maxTimeMS == that.maxTimeMS && maxCommitTimeMS == that.maxCommitTimeMS + && Objects.equals(timeoutMS, that.timeoutMS); + } + + @Override + public int hashCode() { + return Objects.hash(timeoutMS, maxAwaitTimeMS, maxTimeMS, maxCommitTimeMS); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeouts.java b/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeouts.java new file mode 100644 index 00000000000..c0c655abcff --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/ClientSideOperationTimeouts.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.lang.Nullable; + +/** + * A factory for creating {@link ClientSideOperationTimeout} instances + */ +public final class ClientSideOperationTimeouts { + + public static final ClientSideOperationTimeout NO_TIMEOUT = create(null, 0, 0, 0); + + public static ClientSideOperationTimeout create(@Nullable final Long timeoutMS) { + return create(timeoutMS, 0); + } + + public static ClientSideOperationTimeout create(@Nullable final Long timeoutMS, final long maxTimeMS) { + return create(timeoutMS, maxTimeMS, 0); + } + + public static ClientSideOperationTimeout create(@Nullable final Long timeoutMS, + final long maxTimeMS, + final long maxAwaitTimeMS) { + return new ClientSideOperationTimeout(timeoutMS, maxAwaitTimeMS, maxTimeMS, 0); + } + + public static ClientSideOperationTimeout create(@Nullable final Long timeoutMS, + final long maxTimeMS, + final long maxAwaitTimeMS, + final long maxCommitMS) { + return new ClientSideOperationTimeout(timeoutMS, maxAwaitTimeMS, maxTimeMS, maxCommitMS); + } + + public static ClientSideOperationTimeout withMaxCommitMS(@Nullable final Long timeoutMS, + @Nullable final Long maxCommitMS) { + return create(timeoutMS, 0, 0, maxCommitMS != null ? maxCommitMS : 0); + } + + private ClientSideOperationTimeouts() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java index 13166eb53ab..9dd63007b74 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java @@ -18,6 +18,7 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; @@ -31,8 +32,8 @@ public class AbortTransactionOperation extends TransactionOperation { private BsonDocument recoveryToken; - public AbortTransactionOperation(final WriteConcern writeConcern) { - super(writeConcern); + public AbortTransactionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final WriteConcern writeConcern) { + super(clientSideOperationTimeout, writeConcern); } public AbortTransactionOperation recoveryToken(@Nullable final BsonDocument recoveryToken) { @@ -49,7 +50,9 @@ protected String getCommandName() { CommandCreator getCommandCreator() { CommandCreator creator = super.getCommandCreator(); if (recoveryToken != null) { - return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + creator.create(clientSideOperationTimeout, serverDescription, connectionDescription) + .append("recoveryToken", recoveryToken); } return creator; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java index 857c14b857c..5af8841f343 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java @@ -19,6 +19,7 @@ import com.mongodb.ExplainVerbosity; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -31,7 +32,6 @@ import org.bson.codecs.Decoder; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; @@ -44,13 +44,14 @@ public class AggregateOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private final AggregateOperationImpl wrapped; - public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder) { - this(namespace, pipeline, decoder, AggregationLevel.COLLECTION); + public AggregateOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final Decoder decoder) { + this(clientSideOperationTimeout, namespace, pipeline, decoder, AggregationLevel.COLLECTION); } - public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder, - final AggregationLevel aggregationLevel) { - this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, decoder, aggregationLevel); + public AggregateOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { + this.wrapped = new AggregateOperationImpl<>(clientSideOperationTimeout, namespace, pipeline, decoder, aggregationLevel); } public List getPipeline() { @@ -75,24 +76,6 @@ public AggregateOperation batchSize(@Nullable final Integer batchSize) { return this; } - public long getMaxAwaitTime(final TimeUnit timeUnit) { - return wrapped.getMaxAwaitTime(timeUnit); - } - - public AggregateOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - wrapped.maxAwaitTime(maxAwaitTime, timeUnit); - return this; - } - - public long getMaxTime(final TimeUnit timeUnit) { - return wrapped.getMaxTime(timeUnit); - } - - public AggregateOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - wrapped.maxTime(maxTime, timeUnit); - return this; - } - public Collation getCollation() { return wrapped.getCollation(); } @@ -159,24 +142,20 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb } public ReadOperation asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + return new CommandReadOperation<>(wrapped.getClientSideOperationTimeout(), getNamespace().getDatabaseName(), + asExplainCommand(wrapped.getCommand(wrapped.getClientSideOperationTimeout(), NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), + verbosity), resultDecoder); } public AsyncReadOperation asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(wrapped.getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), - resultDecoder); + return new CommandReadOperation<>(wrapped.getClientSideOperationTimeout(), getNamespace().getDatabaseName(), + asExplainCommand(wrapped.getCommand(wrapped.getClientSideOperationTimeout(), NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), + verbosity), resultDecoder); } - MongoNamespace getNamespace() { return wrapped.getNamespace(); } - Decoder getDecoder() { - return wrapped.getDecoder(); - } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java index 4379845bdd1..8add07ace70 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java @@ -19,6 +19,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -38,7 +39,6 @@ import java.util.Arrays; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrueArgument; @@ -59,6 +59,7 @@ class AggregateOperationImpl implements AsyncReadOperation FIELD_NAMES_WITH_RESULT = Arrays.asList(RESULT, FIRST_BATCH); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final List pipeline; private final Decoder decoder; @@ -71,18 +72,20 @@ class AggregateOperationImpl implements AsyncReadOperation pipeline, final Decoder decoder, - final AggregationLevel aggregationLevel) { - this(namespace, pipeline, decoder, defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), - notNull("namespace", namespace).getCollectionName()), defaultPipelineCreator(pipeline)); + AggregateOperationImpl(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { + this(clientSideOperationTimeout, namespace, pipeline, decoder, + defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), + notNull("namespace", namespace).getCollectionName()), + defaultPipelineCreator(pipeline)); } - AggregateOperationImpl(final MongoNamespace namespace, final List pipeline, final Decoder decoder, - final AggregateTarget aggregateTarget, final PipelineCreator pipelineCreator) { + AggregateOperationImpl(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregateTarget aggregateTarget, + final PipelineCreator pipelineCreator) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.decoder = notNull("decoder", decoder); @@ -120,30 +123,6 @@ AggregateOperationImpl batchSize(@Nullable final Integer batchSize) { return this; } - long getMaxAwaitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); - } - - AggregateOperationImpl maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); - return this; - } - - long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - AggregateOperationImpl maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - Collation getCollation() { return collation; } @@ -182,6 +161,10 @@ BsonValue getHint() { return hint; } + public ClientSideOperationTimeout getClientSideOperationTimeout() { + return clientSideOperationTimeout; + } + AggregateOperationImpl hint(@Nullable final BsonValue hint) { isTrueArgument("BsonString or BsonDocument", hint == null || hint.isDocument() || hint.isString()); this.hint = hint; @@ -190,30 +173,34 @@ AggregateOperationImpl hint(@Nullable final BsonValue hint) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), transformer(), retryReads); + return executeRetryableRead(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), + transformer(), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(this.decoder, FIELD_NAMES_WITH_RESULT), asyncTransformer(), retryReads, + executeRetryableReadAsync(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(this.decoder, FIELD_NAMES_WITH_RESULT), + asyncTransformer(), retryReads, errHandlingCallback); } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + getCommand(clientSideOperationTimeout, sessionContext, connectionDescription.getMaxWireVersion()); } - BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { + BsonDocument getCommand(final ClientSideOperationTimeout clientSideOperationTimeout, final SessionContext sessionContext, + final int maxWireVersion) { BsonDocument commandDocument = new BsonDocument("aggregate", aggregateTarget.create()); appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); commandDocument.put("pipeline", pipelineCreator.create()); + long maxTimeMS = clientSideOperationTimeout.getMaxTimeMS(); if (maxTimeMS > 0) { - commandDocument.put("maxTimeMS", maxTimeMS > Integer.MAX_VALUE - ? new BsonInt64(maxTimeMS) : new BsonInt32((int) maxTimeMS)); + commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS)); } BsonDocument cursor = new BsonDocument(); if (batchSize != null) { @@ -247,6 +234,7 @@ private QueryResult createQueryResult(final BsonDocument result, final Connec private CommandReadTransformer> transformer() { return (result, source, connection) -> { QueryResult queryResult = createQueryResult(result, connection.getDescription()); + long maxAwaitTimeMS = clientSideOperationTimeout.getMaxAwaitTimeMS(); return new QueryBatchCursor<>(queryResult, 0, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, comment, source, connection, result); }; @@ -255,6 +243,7 @@ private CommandReadTransformer> transformer() private CommandReadTransformerAsync> asyncTransformer() { return (result, source, connection) -> { QueryResult queryResult = createQueryResult(result, connection.getDescription()); + long maxAwaitTimeMS = clientSideOperationTimeout.getMaxAwaitTimeMS(); return new AsyncQueryBatchCursor<>(queryResult, 0, batchSize != null ? batchSize : 0, maxAwaitTimeMS, decoder, comment, source, connection, result); }; diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java index f41d0e4a462..aef625125fa 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java @@ -21,6 +21,7 @@ import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -36,7 +37,6 @@ import org.bson.codecs.BsonDocumentCodec; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; @@ -56,6 +56,7 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class AggregateToCollectionOperation implements AsyncReadOperation, ReadOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final List pipeline; private final WriteConcern writeConcern; @@ -63,35 +64,21 @@ public class AggregateToCollectionOperation implements AsyncReadOperation, private final AggregationLevel aggregationLevel; private Boolean allowDiskUse; - private long maxTimeMS; private Boolean bypassDocumentValidation; private Collation collation; private BsonValue comment; private BsonValue hint; private BsonDocument variables; - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline) { - this(namespace, pipeline, null, null, AggregationLevel.COLLECTION); + public AggregateToCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final ReadConcern readConcern, final WriteConcern writeConcern) { + this(clientSideOperationTimeout, namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); } - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final WriteConcern writeConcern) { - this(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final ReadConcern readConcern) { - this(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - final ReadConcern readConcern, final WriteConcern writeConcern) { - this(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); - } - - public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, - @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, - final AggregationLevel aggregationLevel) { + public AggregateToCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, + final AggregationLevel aggregationLevel) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.pipeline = notNull("pipeline", pipeline); this.writeConcern = writeConcern; @@ -122,17 +109,6 @@ public AggregateToCollectionOperation allowDiskUse(@Nullable final Boolean allow return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public AggregateToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public Boolean getBypassDocumentValidation() { return bypassDocumentValidation; } @@ -176,10 +152,10 @@ public AggregateToCollectionOperation hint(@Nullable final BsonValue hint) { @Override public Void execute(final ReadBinding binding) { - return executeRetryableRead(binding, + return executeRetryableRead(clientSideOperationTimeout, binding, () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()), namespace.getDatabaseName(), - (serverDescription, connectionDescription) -> getCommand(), + (clientSideOperationTimeout, serverDescription, connectionDescription) -> getCommand(), new BsonDocumentCodec(), (result, source, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), connection.getDescription().getMaxWireVersion()); @@ -189,12 +165,11 @@ public Void execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, - (connectionSourceCallback) -> { - binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback); - }, + executeRetryableReadAsync(clientSideOperationTimeout, binding, + (connectionSourceCallback) -> + binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback), namespace.getDatabaseName(), - (serverDescription, connectionDescription) -> getCommand(), + (clientSideOperationTimeout, serverDescription, connectionDescription) -> getCommand(), new BsonDocumentCodec(), (result, source, connection) -> { throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), connection.getDescription().getMaxWireVersion()); @@ -208,6 +183,7 @@ private BsonDocument getCommand() { BsonDocument commandDocument = new BsonDocument("aggregate", aggregationTarget); commandDocument.put("pipeline", new BsonArray(pipeline)); + long maxTimeMS = clientSideOperationTimeout.getMaxTimeMS(); if (maxTimeMS > 0) { commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS)); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index 21b10cdff08..ff706753885 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -22,6 +22,7 @@ import com.mongodb.ReadPreference; import com.mongodb.ServerAddress; import com.mongodb.assertions.Assertions; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackBiFunction; @@ -153,6 +154,7 @@ static void withAsyncConnectionSource(final AsyncConnectionSource source, final } static void executeRetryableReadAsync( + final ClientSideOperationTimeout clientSideOperationTimeout, final AsyncReadBinding binding, final String database, final CommandCreator commandCreator, @@ -160,11 +162,12 @@ static void executeRetryableReadAsync( final CommandReadTransformerAsync transformer, final boolean retryReads, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads, - callback); + executeRetryableReadAsync(clientSideOperationTimeout, binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads, callback); } static void executeRetryableReadAsync( + final ClientSideOperationTimeout clientSideOperationTimeout, final AsyncReadBinding binding, final AsyncCallbackSupplier sourceAsyncSupplier, final String database, @@ -185,11 +188,8 @@ static void executeRetryableReadAsync( releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, - database, commandCreator, - decoder, transformer, - connection, - releasingCallback); + createReadCommandAndExecuteAsync(clientSideOperationTimeout, retryState, binding, source, database, + commandCreator, decoder, transformer, connection, releasingCallback); }) ).whenComplete(binding::release); asyncRead.get(errorHandlingCallback(callback, OperationHelper.LOGGER)); @@ -209,6 +209,7 @@ static void executeCommandAsync(final AsyncWriteBinding binding, } static void executeRetryableWriteAsync( + final ClientSideOperationTimeout clientSideOperationTimeout, final AsyncWriteBinding binding, final String database, @Nullable final ReadPreference readPreference, @@ -243,7 +244,7 @@ static void executeRetryableWriteAsync( .map(previousAttemptCommand -> { Assertions.assertFalse(firstAttempt); return retryCommandModifier.apply(previousAttemptCommand); - }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription())); + }).orElseGet(() -> commandCreator.create(clientSideOperationTimeout, source.getServerDescription(), connection.getDescription())); // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) .attach(AttachmentKeys.retryableCommandFlag(), isRetryWritesEnabled(command), true) @@ -262,6 +263,7 @@ static void executeRetryableWriteAsync( } static void createReadCommandAndExecuteAsync( + final ClientSideOperationTimeout clientSideOperationTimeout, final RetryState retryState, final AsyncReadBinding binding, final AsyncConnectionSource source, @@ -273,7 +275,7 @@ static void createReadCommandAndExecuteAsync( final SingleResultCallback callback) { BsonDocument command; try { - command = commandCreator.create(source.getServerDescription(), connection.getDescription()); + command = commandCreator.create(clientSideOperationTimeout, source.getServerDescription(), connection.getDescription()); retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); } catch (IllegalArgumentException e) { callback.onResult(null, e); diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java index 81b5fb513f2..38193bf106e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java @@ -69,9 +69,9 @@ public final class AsyncOperations { public AsyncOperations(final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, - final boolean retryWrites, final boolean retryReads) { + final boolean retryWrites, final boolean retryReads, @Nullable final Long timeoutMS) { this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads); + retryWrites, retryReads, timeoutMS); } public MongoNamespace getNamespace() { @@ -106,6 +106,11 @@ public boolean isRetryReads() { return operations.isRetryReads(); } + @Nullable + public Long getTimeoutMS() { + return operations.getTimeoutMS(); + } + public AsyncReadOperation countDocuments(final Bson filter, final CountOptions options) { return operations.countDocuments(filter, options); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java index b7f721b5fc4..d97b80da764 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncQueryBatchCursor.java @@ -55,9 +55,9 @@ import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.CursorHelper.getNumberToReturn; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; import static com.mongodb.internal.operation.QueryHelper.translateCommandException; import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; +import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; import static java.lang.String.format; import static java.util.Collections.singletonList; diff --git a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java index e3ae79fa589..6603799276f 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java @@ -20,6 +20,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -33,8 +34,6 @@ import org.bson.codecs.Decoder; import org.bson.conversions.Bson; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; @@ -43,7 +42,6 @@ import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.OperationHelper.validateHintForFindAndModify; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * Abstract base class for findAndModify-based operations @@ -51,7 +49,7 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperation, WriteOperation { - + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final WriteConcern writeConcern; private final boolean retryWrites; @@ -60,15 +58,15 @@ public abstract class BaseFindAndModifyOperation implements AsyncWriteOperati private BsonDocument filter; private BsonDocument projection; private BsonDocument sort; - private long maxTimeMS; private Collation collation; private Bson hint; private String hintString; private BsonValue comment; private BsonDocument variables; - protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, - final boolean retryWrites, final Decoder decoder) { + protected BaseFindAndModifyOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.writeConcern = notNull("writeConcern", writeConcern); this.retryWrites = retryWrites; @@ -77,7 +75,7 @@ protected BaseFindAndModifyOperation(final MongoNamespace namespace, final Write @Override public T execute(final WriteBinding binding) { - return executeRetryableWrite(binding, getDatabaseName(), null, getFieldNameValidator(), + return executeRetryableWrite(clientSideOperationTimeout, binding, getDatabaseName(), null, getFieldNameValidator(), CommandResultDocumentCodec.create(getDecoder(), "value"), getCommandCreator(binding.getSessionContext()), FindAndModifyHelper.transformer(), @@ -86,7 +84,7 @@ public T execute(final WriteBinding binding) { @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { - executeRetryableWriteAsync(binding, getDatabaseName(), null, getFieldNameValidator(), + executeRetryableWriteAsync(clientSideOperationTimeout, binding, getDatabaseName(), null, getFieldNameValidator(), CommandResultDocumentCodec.create(getDecoder(), "value"), getCommandCreator(binding.getSessionContext()), FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback); } @@ -125,17 +123,6 @@ public BaseFindAndModifyOperation projection(@Nullable final BsonDocument pro return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - public BaseFindAndModifyOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public BsonDocument getSort() { return sort; } @@ -198,7 +185,7 @@ public BaseFindAndModifyOperation let(@Nullable final BsonDocument variables) protected abstract void specializeCommand(BsonDocument initialCommand, ConnectionDescription connectionDescription); private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> { + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { BsonDocument commandDocument = new BsonDocument("findAndModify", new BsonString(getNamespace().getCollectionName())); putIfNotNull(commandDocument, "query", getFilter()); putIfNotNull(commandDocument, "fields", getProjection()); @@ -206,7 +193,7 @@ private CommandCreator getCommandCreator(final SessionContext sessionContext) { specializeCommand(commandDocument, connectionDescription); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); + putIfNotZero(commandDocument, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault() && !sessionContext.hasActiveTransaction()) { commandDocument.put("writeConcern", getWriteConcern().asDocument()); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java index a2ba029eb56..14a1ae974ac 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java @@ -20,6 +20,7 @@ import com.mongodb.client.model.Collation; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -40,7 +41,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncReadConnectionSource; @@ -65,15 +65,16 @@ public class ChangeStreamOperation implements AsyncReadOperation pipeline, final Decoder decoder) { - this(namespace, fullDocument, fullDocumentBeforeChange, pipeline, decoder, ChangeStreamLevel.COLLECTION); + public ChangeStreamOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, + final List pipeline, final Decoder decoder) { + this(clientSideOperationTimeout, namespace, fullDocument, fullDocumentBeforeChange, pipeline, decoder, ChangeStreamLevel.COLLECTION); } - public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, - final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, + public ChangeStreamOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final FullDocument fullDocument, final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, final ChangeStreamLevel changeStreamLevel) { - this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, + this.wrapped = new AggregateOperationImpl<>(clientSideOperationTimeout, namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, getAggregateTarget(), getPipelineCreator()); this.fullDocument = notNull("fullDocument", fullDocument); this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); @@ -124,15 +125,6 @@ public ChangeStreamOperation batchSize(@Nullable final Integer batchSize) { return this; } - public long getMaxAwaitTime(final TimeUnit timeUnit) { - return wrapped.getMaxAwaitTime(timeUnit); - } - - public ChangeStreamOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - wrapped.maxAwaitTime(maxAwaitTime, timeUnit); - return this; - } - public Collation getCollation() { return wrapped.getCollation(); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java index fb1cc3c2da2..0414db9221c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java @@ -28,6 +28,7 @@ import com.mongodb.assertions.Assertions; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.function.RetryState; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; @@ -48,7 +49,8 @@ final class CommandOperationHelper { interface CommandCreator { - BsonDocument create(ServerDescription serverDescription, ConnectionDescription connectionDescription); + BsonDocument create(ClientSideOperationTimeout clientSideOperationTimeout, ServerDescription serverDescription, + ConnectionDescription connectionDescription); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java index 47b807f91ec..eb39c2d1504 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java @@ -16,6 +16,7 @@ package com.mongodb.internal.operation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -33,11 +34,14 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CommandReadOperation implements AsyncReadOperation, ReadOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final String databaseName; private final BsonDocument command; private final Decoder decoder; - public CommandReadOperation(final String databaseName, final BsonDocument command, final Decoder decoder) { + public CommandReadOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final String databaseName, + final BsonDocument command, final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.databaseName = notNull("databaseName", databaseName); this.command = notNull("command", command); this.decoder = notNull("decoder", decoder); @@ -45,16 +49,18 @@ public CommandReadOperation(final String databaseName, final BsonDocument comman @Override public T execute(final ReadBinding binding) { - return executeRetryableRead(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result, false); + return executeRetryableRead(clientSideOperationTimeout, binding, databaseName, getCommandCreator(), decoder, + (result, source, connection) -> result, false); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, databaseName, getCommandCreator(), decoder, (result, source, connection) -> result, - false, callback); + executeRetryableReadAsync(clientSideOperationTimeout, binding, databaseName, getCommandCreator(), decoder, + (result, source, connection) -> result, false, callback); } + // TODO (CSOT) - JAVA-5098 - should the command be modified for CSOT? private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> command; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> command; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java index 92779bc61ae..5127160e3f5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java @@ -25,22 +25,19 @@ import com.mongodb.MongoTimeoutException; import com.mongodb.MongoWriteConcernException; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import org.bson.BsonInt32; -import org.bson.BsonInt64; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; -import static com.mongodb.assertions.Assertions.isTrueArgument; -import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.CommandOperationHelper.RETRYABLE_WRITE_ERROR_LABEL; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -52,14 +49,14 @@ public class CommitTransactionOperation extends TransactionOperation { private final boolean alreadyCommitted; private BsonDocument recoveryToken; - private Long maxCommitTimeMS; - public CommitTransactionOperation(final WriteConcern writeConcern) { - this(writeConcern, false); + public CommitTransactionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final WriteConcern writeConcern) { + this(clientSideOperationTimeout, writeConcern, false); } - public CommitTransactionOperation(final WriteConcern writeConcern, final boolean alreadyCommitted) { - super(writeConcern); + public CommitTransactionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final WriteConcern writeConcern, + final boolean alreadyCommitted) { + super(clientSideOperationTimeout, writeConcern); this.alreadyCommitted = alreadyCommitted; } @@ -68,26 +65,6 @@ public CommitTransactionOperation recoveryToken(@Nullable final BsonDocument rec return this; } - public CommitTransactionOperation maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit timeUnit) { - if (maxCommitTime == null) { - this.maxCommitTimeMS = null; - } else { - notNull("timeUnit", timeUnit); - isTrueArgument("maxCommitTime > 0", maxCommitTime > 0); - this.maxCommitTimeMS = MILLISECONDS.convert(maxCommitTime, timeUnit); - } - return this; - } - - @Nullable - public Long getMaxCommitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - if (maxCommitTimeMS == null) { - return null; - } - return timeUnit.convert(maxCommitTimeMS, MILLISECONDS); - } - @Override public Void execute(final WriteBinding binding) { try { @@ -143,20 +120,20 @@ protected String getCommandName() { @Override CommandCreator getCommandCreator() { - CommandCreator creator = (serverDescription, connectionDescription) -> { - BsonDocument command = CommitTransactionOperation.super.getCommandCreator().create(serverDescription, - connectionDescription); - if (maxCommitTimeMS != null) { - command.append("maxTimeMS", - maxCommitTimeMS > Integer.MAX_VALUE - ? new BsonInt64(maxCommitTimeMS) : new BsonInt32(maxCommitTimeMS.intValue())); - } + CommandCreator creator = (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument command = CommitTransactionOperation.super.getCommandCreator() + .create(clientSideOperationTimeout, serverDescription, connectionDescription); + long maxCommitTimeMS = clientSideOperationTimeout.getMaxCommitTimeMS(); + putIfNotZero(command, "maxTimeMS", maxCommitTimeMS); return command; }; if (alreadyCommitted) { - return (serverDescription, connectionDescription) -> getRetryCommandModifier().apply(creator.create(serverDescription, connectionDescription)); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + getRetryCommandModifier().apply(creator.create(clientSideOperationTimeout, serverDescription, connectionDescription)); } else if (recoveryToken != null) { - return (serverDescription, connectionDescription) -> creator.create(serverDescription, connectionDescription).append("recoveryToken", recoveryToken); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + creator.create(clientSideOperationTimeout, serverDescription, connectionDescription) + .append("recoveryToken", recoveryToken); } return creator; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java index 5cdb974b7c0..453597723cb 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -31,7 +32,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; @@ -40,6 +40,7 @@ */ public class CountDocumentsOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private boolean retryReads; private BsonDocument filter; @@ -47,13 +48,14 @@ public class CountDocumentsOperation implements AsyncReadOperation, ReadOp private BsonValue comment; private long skip; private long limit; - private long maxTimeMS; private Collation collation; - public CountDocumentsOperation(final MongoNamespace namespace) { + public CountDocumentsOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); } + @Nullable public BsonDocument getFilter() { return filter; } @@ -72,6 +74,7 @@ public boolean getRetryReads() { return retryReads; } + @Nullable public BsonValue getHint() { return hint; } @@ -99,17 +102,7 @@ public CountDocumentsOperation skip(final long skip) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public CountDocumentsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - + @Nullable public Collation getCollation() { return collation; } @@ -153,12 +146,11 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb } private AggregateOperation getAggregateOperation() { - return new AggregateOperation<>(namespace, getPipeline(), DECODER) + return new AggregateOperation<>(clientSideOperationTimeout, namespace, getPipeline(), DECODER) .retryReads(retryReads) .collation(collation) .comment(comment) - .hint(hint) - .maxTime(maxTimeMS, TimeUnit.MILLISECONDS); + .hint(hint); } private List getPipeline() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java index 43298bae4bf..fde5991360c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java @@ -18,7 +18,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; -import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -30,8 +30,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; @@ -47,16 +45,17 @@ */ public class CountOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private boolean retryReads; private BsonDocument filter; private BsonValue hint; private long skip; private long limit; - private long maxTimeMS; private Collation collation; - public CountOperation(final MongoNamespace namespace) { + public CountOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); } @@ -105,17 +104,6 @@ public CountOperation skip(final long skip) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public CountOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public Collation getCollation() { return collation; } @@ -127,14 +115,14 @@ public CountOperation collation(@Nullable final Collation collation) { @Override public Long execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), + return executeRetryableRead(clientSideOperationTimeout, binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), DECODER, transformer(), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), DECODER, - asyncTransformer(), retryReads, callback); + executeRetryableReadAsync(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), DECODER, asyncTransformer(), retryReads, callback); } private CommandReadTransformer transformer() { @@ -146,23 +134,21 @@ private CommandReadTransformerAsync asyncTransformer() { } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription); - } - - private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) { - BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); - - appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document); - - putIfNotNull(document, "query", filter); - putIfNotZero(document, "limit", limit); - putIfNotZero(document, "skip", skip); - putIfNotNull(document, "hint", hint); - putIfNotZero(document, "maxTimeMS", maxTimeMS); - - if (collation != null) { - document.put("collation", collation.asDocument()); - } - return document; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); + + appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document); + + putIfNotNull(document, "query", filter); + putIfNotZero(document, "limit", limit); + putIfNotZero(document, "skip", skip); + putIfNotNull(document, "hint", hint); + putIfNotZero(document, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); + + if (collation != null) { + document.put("collation", collation.asDocument()); + } + return document; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java index c78fee6838e..85ea7e52fca 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java @@ -26,6 +26,7 @@ import com.mongodb.client.model.ValidationAction; import com.mongodb.client.model.ValidationLevel; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -71,6 +72,7 @@ public class CreateCollectionOperation implements AsyncWriteOperation, Wri private static final BsonDocument ENCRYPT_CLUSTERED_INDEX = BsonDocument.parse("{key: {_id: 1}, unique: true}"); private static final BsonArray SAFE_CONTENT_ARRAY = new BsonArray( singletonList(BsonDocument.parse("{key: {__safeContent__: 1}, name: '__safeContent___1'}"))); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final String databaseName; private final String collectionName; private final WriteConcern writeConcern; @@ -92,11 +94,9 @@ public class CreateCollectionOperation implements AsyncWriteOperation, Wri private String clusteredIndexName; private BsonDocument encryptedFields; - public CreateCollectionOperation(final String databaseName, final String collectionName) { - this(databaseName, collectionName, null); - } - - public CreateCollectionOperation(final String databaseName, final String collectionName, @Nullable final WriteConcern writeConcern) { + public CreateCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final String databaseName, + final String collectionName, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.databaseName = notNull("databaseName", databaseName); this.collectionName = notNull("collectionName", collectionName); this.writeConcern = writeConcern; diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java index b47b45a5eee..eca30e18dc3 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java @@ -26,6 +26,7 @@ import com.mongodb.WriteConcern; import com.mongodb.WriteConcernResult; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -44,7 +45,6 @@ import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; @@ -66,18 +66,15 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CreateIndexesOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final List requests; private final WriteConcern writeConcern; - private long maxTimeMS; private CreateIndexCommitQuorum commitQuorum; - public CreateIndexesOperation(final MongoNamespace namespace, final List requests) { - this(namespace, requests, null); - } - - public CreateIndexesOperation(final MongoNamespace namespace, final List requests, - @Nullable final WriteConcern writeConcern) { + public CreateIndexesOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List requests, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.requests = notNull("indexRequests", requests); this.writeConcern = writeConcern; @@ -103,18 +100,6 @@ public List getIndexNames() { return indexNames; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public CreateIndexesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public CreateIndexCommitQuorum getCommitQuorum() { return commitQuorum; } @@ -231,7 +216,7 @@ private BsonDocument getCommand(final ConnectionDescription description) { values.add(getIndex(request)); } command.put("indexes", new BsonArray(values)); - putIfNotZero(command, "maxTimeMS", maxTimeMS); + putIfNotZero(command, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); appendWriteConcernToCommand(writeConcern, command); if (commitQuorum != null) { if (serverIsAtLeastVersionFourDotFour(description)) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java index 8d1e98de6b8..cbb1b360095 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java @@ -18,6 +18,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -47,6 +48,7 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class CreateViewOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final String databaseName; private final String viewName; private final String viewOn; @@ -54,8 +56,9 @@ public class CreateViewOperation implements AsyncWriteOperation, WriteOper private final WriteConcern writeConcern; private Collation collation; - public CreateViewOperation(final String databaseName, final String viewName, final String viewOn, final List pipeline, - final WriteConcern writeConcern) { + public CreateViewOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final String databaseName, + final String viewName, final String viewOn, final List pipeline, final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.databaseName = notNull("databaseName", databaseName); this.viewName = notNull("viewName", viewName); this.viewOn = notNull("viewOn", viewOn); diff --git a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java index a64c4cbfadd..50850f2789e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java @@ -19,6 +19,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -32,8 +33,6 @@ import org.bson.codecs.Codec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -54,16 +53,18 @@ public class DistinctOperation implements AsyncReadOperation>, ReadOperation> { private static final String VALUES = "values"; + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final String fieldName; private final Decoder decoder; private boolean retryReads; private BsonDocument filter; - private long maxTimeMS; private Collation collation; private BsonValue comment; - public DistinctOperation(final MongoNamespace namespace, final String fieldName, final Decoder decoder) { + public DistinctOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final String fieldName, final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.fieldName = notNull("fieldName", fieldName); this.decoder = notNull("decoder", decoder); @@ -87,17 +88,6 @@ public boolean getRetryReads() { return retryReads; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public DistinctOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public Collation getCollation() { return collation; } @@ -119,14 +109,15 @@ public DistinctOperation comment(final BsonValue comment) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), transformer(), retryReads); + return executeRetryableRead(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), createCommandDecoder(), transformer(), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - createCommandDecoder(), asyncTransformer(), retryReads, errorHandlingCallback(callback, LOGGER)); + executeRetryableReadAsync(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), createCommandDecoder(), asyncTransformer(), retryReads, + errorHandlingCallback(callback, LOGGER)); } private Codec createCommandDecoder() { @@ -153,19 +144,17 @@ private CommandReadTransformerAsync> asyncTran } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription); - } - - private BsonDocument getCommand(final SessionContext sessionContext, final ConnectionDescription connectionDescription) { - BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName())); - appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), commandDocument); - commandDocument.put("key", new BsonString(fieldName)); - putIfNotNull(commandDocument, "query", filter); - putIfNotZero(commandDocument, "maxTimeMS", maxTimeMS); - if (collation != null) { - commandDocument.put("collation", collation.asDocument()); - } - putIfNotNull(commandDocument, "comment", comment); - return commandDocument; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument("distinct", new BsonString(namespace.getCollectionName())); + appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), commandDocument); + commandDocument.put("key", new BsonString(fieldName)); + putIfNotNull(commandDocument, "query", filter); + putIfNotZero(commandDocument, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java index d0e73948339..46a66fcf28e 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java @@ -59,6 +59,12 @@ static void putIfNotNull(final BsonDocument command, final String key, @Nullable } } + static void putIfNotNull(final BsonDocument command, final String key, @Nullable final Boolean value) { + if (value != null) { + command.put(key, new BsonBoolean(value)); + } + } + static void putIfNotZero(final BsonDocument command, final String key, final int value) { if (value != 0) { command.put(key, new BsonInt32(value)); diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java index 6ddc087bdee..82490ae875d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java @@ -19,6 +19,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadWriteBinding; import com.mongodb.internal.binding.AsyncWriteBinding; @@ -61,16 +62,15 @@ public class DropCollectionOperation implements AsyncWriteOperation, WriteOperation { private static final String ENCRYPT_PREFIX = "enxcol_."; private static final BsonValueCodec BSON_VALUE_CODEC = new BsonValueCodec(); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final WriteConcern writeConcern; private BsonDocument encryptedFields; private boolean autoEncryptedFields; - public DropCollectionOperation(final MongoNamespace namespace) { - this(namespace, null); - } - - public DropCollectionOperation(final MongoNamespace namespace, @Nullable final WriteConcern writeConcern) { + public DropCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.writeConcern = writeConcern; } @@ -217,7 +217,7 @@ private BsonDocument getCollectionEncryptedFields(final BsonDocument defaultEncr } private ListCollectionsOperation listCollectionOperation() { - return new ListCollectionsOperation<>(namespace.getDatabaseName(), BSON_VALUE_CODEC) + return new ListCollectionsOperation<>(clientSideOperationTimeout, namespace.getDatabaseName(), BSON_VALUE_CODEC) .filter(new BsonDocument("name", new BsonString(namespace.getCollectionName()))) .batchSize(1); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java index 2dad7dda177..64112339c1c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java @@ -17,6 +17,7 @@ package com.mongodb.internal.operation; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -43,14 +44,13 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class DropDatabaseOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final String databaseName; private final WriteConcern writeConcern; - public DropDatabaseOperation(final String databaseName) { - this(databaseName, null); - } - - public DropDatabaseOperation(final String databaseName, @Nullable final WriteConcern writeConcern) { + public DropDatabaseOperation(final ClientSideOperationTimeout clientSideOperationTimeout, + final String databaseName, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.databaseName = notNull("databaseName", databaseName); this.writeConcern = writeConcern; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java index 66bb8f408fb..e3dbf619575 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java @@ -19,6 +19,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -26,9 +27,6 @@ import org.bson.BsonDocument; import org.bson.BsonString; -import java.util.concurrent.TimeUnit; - -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; @@ -50,28 +48,24 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class DropIndexOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final String indexName; private final BsonDocument indexKeys; private final WriteConcern writeConcern; - private long maxTimeMS; - - public DropIndexOperation(final MongoNamespace namespace, final String indexName) { - this(namespace, indexName, null); - } - - public DropIndexOperation(final MongoNamespace namespace, final BsonDocument keys) { - this(namespace, keys, null); - } - public DropIndexOperation(final MongoNamespace namespace, final String indexName, @Nullable final WriteConcern writeConcern) { + public DropIndexOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final String indexName, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.indexName = notNull("indexName", indexName); this.indexKeys = null; this.writeConcern = writeConcern; } - public DropIndexOperation(final MongoNamespace namespace, final BsonDocument indexKeys, @Nullable final WriteConcern writeConcern) { + public DropIndexOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final BsonDocument indexKeys, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.indexKeys = notNull("indexKeys", indexKeys); this.indexName = null; @@ -82,18 +76,6 @@ public WriteConcern getWriteConcern() { return writeConcern; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public DropIndexOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - @Override public Void execute(final WriteBinding binding) { return withConnection(binding, connection -> { @@ -135,7 +117,7 @@ private BsonDocument getCommand() { command.put("index", indexKeys); } - putIfNotZero(command, "maxTimeMS", maxTimeMS); + putIfNotZero(command, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); appendWriteConcernToCommand(writeConcern, command); return command; } diff --git a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java index 571de884582..824a045130a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java @@ -19,6 +19,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -30,8 +31,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -50,12 +49,14 @@ */ public class EstimatedDocumentCountOperation implements AsyncReadOperation, ReadOperation { private static final Decoder DECODER = new BsonDocumentCodec(); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private boolean retryReads; - private long maxTimeMS; private BsonValue comment; - public EstimatedDocumentCountOperation(final MongoNamespace namespace) { + public EstimatedDocumentCountOperation(final ClientSideOperationTimeout clientSideOperationTimeout, + final MongoNamespace namespace) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); } @@ -64,12 +65,6 @@ public EstimatedDocumentCountOperation retryReads(final boolean retryReads) { return this; } - public EstimatedDocumentCountOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - @Nullable public BsonValue getComment() { return comment; @@ -83,8 +78,9 @@ public EstimatedDocumentCountOperation comment(@Nullable final BsonValue comment @Override public Long execute(final ReadBinding binding) { try { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), transformer(), retryReads); + return executeRetryableRead(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + transformer(), retryReads); } catch (MongoCommandException e) { return assertNotNull(rethrowIfNotNamespaceError(e, 0L)); } @@ -92,8 +88,9 @@ public Long execute(final ReadBinding binding) { @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), asyncTransformer(), retryReads, + executeRetryableReadAsync(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + asyncTransformer(), retryReads, (result, t) -> { if (isNamespaceError(t)) { callback.onResult(0L, null); @@ -116,10 +113,10 @@ private long transformResult(final BsonDocument result, final ConnectionDescript } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> { + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { BsonDocument document = new BsonDocument("count", new BsonString(namespace.getCollectionName())); appendReadConcernToCommand(sessionContext, connectionDescription.getMaxWireVersion(), document); - putIfNotZero(document, "maxTimeMS", maxTimeMS); + putIfNotZero(document, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); if (comment != null) { document.put("comment", comment); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java index ede7ee51628..f1dc75e1a1a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java @@ -20,6 +20,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.lang.Nullable; import org.bson.BsonBoolean; @@ -29,8 +30,6 @@ import org.bson.codecs.Decoder; import org.bson.conversions.Bson; -import java.util.concurrent.TimeUnit; - /** * An operation that atomically finds and deletes a single document. * @@ -38,9 +37,9 @@ */ public class FindAndDeleteOperation extends BaseFindAndModifyOperation { - public FindAndDeleteOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder) { - super(namespace, writeConcern, retryWrites, decoder); + public FindAndDeleteOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder) { + super(clientSideOperationTimeout, namespace, writeConcern, retryWrites, decoder); } @Override @@ -55,12 +54,6 @@ public FindAndDeleteOperation projection(@Nullable final BsonDocument project return this; } - @Override - public FindAndDeleteOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndDeleteOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java index de988c963a4..9cffda1565b 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java @@ -20,6 +20,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; @@ -33,7 +34,6 @@ import java.util.HashMap; import java.util.Map; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; @@ -49,9 +49,9 @@ public class FindAndReplaceOperation extends BaseFindAndModifyOperation { private boolean upsert; private Boolean bypassDocumentValidation; - public FindAndReplaceOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final BsonDocument replacement) { - super(namespace, writeConcern, retryWrites, decoder); + public FindAndReplaceOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument replacement) { + super(clientSideOperationTimeout, namespace, writeConcern, retryWrites, decoder); this.replacement = notNull("replacement", replacement); } @@ -98,12 +98,6 @@ public FindAndReplaceOperation projection(@Nullable final BsonDocument projec return this; } - @Override - public FindAndReplaceOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndReplaceOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java index 17ce879102d..2b21c61a703 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java @@ -20,6 +20,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.validator.MappedFieldNameValidator; import com.mongodb.internal.validator.NoOpFieldNameValidator; import com.mongodb.internal.validator.UpdateFieldNameValidator; @@ -35,7 +36,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; @@ -54,16 +54,16 @@ public class FindAndUpdateOperation extends BaseFindAndModifyOperation { private Boolean bypassDocumentValidation; private List arrayFilters; - public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final BsonDocument update) { - super(namespace, writeConcern, retryWrites, decoder); + public FindAndUpdateOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument update) { + super(clientSideOperationTimeout, namespace, writeConcern, retryWrites, decoder); this.update = notNull("update", update); this.updatePipeline = null; } - public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, - final Decoder decoder, final List update) { - super(namespace, writeConcern, retryWrites, decoder); + public FindAndUpdateOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final List update) { + super(clientSideOperationTimeout, namespace, writeConcern, retryWrites, decoder); this.updatePipeline = update; this.update = null; } @@ -126,12 +126,6 @@ public FindAndUpdateOperation projection(@Nullable final BsonDocument project return this; } - @Override - public FindAndUpdateOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - super.maxTime(maxTime, timeUnit); - return this; - } - @Override public FindAndUpdateOperation sort(@Nullable final BsonDocument sort) { super.sort(sort); diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java index dcb94211fcf..e8fe9645fb2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -22,6 +22,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.MongoQueryException; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -40,10 +41,8 @@ import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -73,6 +72,7 @@ public class FindOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private static final String FIRST_BATCH = "firstBatch"; + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final Decoder decoder; private boolean retryReads; @@ -80,8 +80,6 @@ public class FindOperation implements AsyncExplainableReadOperation implements AsyncExplainableReadOperation decoder) { + public FindOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.decoder = notNull("decoder", decoder); } @@ -147,30 +147,6 @@ public FindOperation projection(@Nullable final BsonDocument projection) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public FindOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxTime >= 0", maxTime >= 0); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - - public long getMaxAwaitTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); - } - - public FindOperation maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - isTrueArgument("maxAwaitTime >= 0", maxAwaitTime >= 0); - this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); - return this; - } - public int getSkip() { return skip; } @@ -322,7 +298,7 @@ public BatchCursor execute(final ReadBinding binding) { withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); try { - return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), + return createReadCommandAndExecute(clientSideOperationTimeout, retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), transformer(), connection); } catch (MongoCommandException e) { @@ -347,7 +323,7 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb return; } SingleResultCallback> wrappedCallback = exceptionTransformingCallback(releasingCallback); - createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(), + createReadCommandAndExecuteAsync(clientSideOperationTimeout, retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), asyncTransformer(), connection, wrappedCallback); }) @@ -374,20 +350,21 @@ private static SingleResultCallback exceptionTransformingCallback(final S @Override public ReadOperation asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), + return new CommandReadOperation<>(clientSideOperationTimeout, getNamespace().getDatabaseName(), + asExplainCommand(getCommand(clientSideOperationTimeout, NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), resultDecoder); } @Override public AsyncReadOperation asAsyncExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { - return new CommandReadOperation<>(getNamespace().getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), + return new CommandReadOperation<>(clientSideOperationTimeout, getNamespace().getDatabaseName(), + asExplainCommand(getCommand(clientSideOperationTimeout, NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), verbosity), resultDecoder); } - private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { + private BsonDocument getCommand(final ClientSideOperationTimeout clientSideOperationTimeout, final SessionContext sessionContext, + final int maxWireVersion) { BsonDocument commandDocument = new BsonDocument("find", new BsonString(namespace.getCollectionName())); appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); @@ -411,6 +388,7 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m if (limit < 0 || batchSize < 0) { commandDocument.put("singleBatch", BsonBoolean.TRUE); } + long maxTimeMS = clientSideOperationTimeout.getMaxTimeMS(); if (maxTimeMS > 0) { commandDocument.put("maxTimeMS", new BsonInt64(maxTimeMS)); } @@ -460,7 +438,8 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + getCommand(clientSideOperationTimeout, sessionContext, connectionDescription.getMaxWireVersion()); } private boolean isTailableCursor() { @@ -481,7 +460,7 @@ private CommandReadTransformer> transformer() } private long getMaxTimeForCursor() { - return cursorType == CursorType.TailableAwait ? maxAwaitTimeMS : 0; + return cursorType == CursorType.TailableAwait ? clientSideOperationTimeout.getMaxAwaitTimeMS() : 0; } private CommandReadTransformerAsync> asyncTransformer() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java index fa2a5dcd995..c9bcf2a6f88 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -26,15 +27,12 @@ import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; -import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonValue; import org.bson.codecs.Codec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; @@ -50,6 +48,8 @@ import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; @@ -67,16 +67,18 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListCollectionsOperation implements AsyncReadOperation>, ReadOperation> { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final String databaseName; private final Decoder decoder; private boolean retryReads; private BsonDocument filter; private int batchSize; - private long maxTimeMS; private boolean nameOnly; private BsonValue comment; - public ListCollectionsOperation(final String databaseName, final Decoder decoder) { + public ListCollectionsOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final String databaseName, + final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.databaseName = notNull("databaseName", databaseName); this.decoder = notNull("decoder", decoder); } @@ -108,17 +110,6 @@ public ListCollectionsOperation batchSize(final int batchSize) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListCollectionsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListCollectionsOperation retryReads(final boolean retryReads) { this.retryReads = retryReads; return this; @@ -145,8 +136,8 @@ public BatchCursor execute(final ReadBinding binding) { withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); try { - return createReadCommandAndExecute(retryState, binding, source, databaseName, getCommandCreator(), - createCommandDecoder(), commandTransformer(), connection); + return createReadCommandAndExecute(clientSideOperationTimeout, retryState, binding, source, databaseName, + getCommandCreator(), createCommandDecoder(), commandTransformer(), connection); } catch (MongoCommandException e) { return rethrowIfNotNamespaceError(e, createEmptyBatchCursor(createNamespace(), decoder, source.getServerDescription().getAddress(), batchSize)); @@ -168,8 +159,8 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb binding.getSessionContext()), releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, databaseName, getCommandCreator(), createCommandDecoder(), - asyncTransformer(), connection, (result, t) -> { + createReadCommandAndExecuteAsync(clientSideOperationTimeout, retryState, binding, source, databaseName, + getCommandCreator(), createCommandDecoder(), asyncTransformer(), connection, (result, t) -> { if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { @@ -198,23 +189,15 @@ private CommandReadTransformer> commandTransformer( } private CommandOperationHelper.CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listCollections", new BsonInt32(1)) - .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); - if (filter != null) { - command.append("filter", filter); - } - if (nameOnly) { - command.append("nameOnly", BsonBoolean.TRUE); - } - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - putIfNotNull(command, "comment", comment); - return command; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument("listCollections", new BsonInt32(1)) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + putIfNotNull(command, "filter", filter); + putIfTrue(command, "nameOnly", nameOnly); + putIfNotZero(command, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); + putIfNotNull(command, "comment", comment); + return command; + }; } private Codec createCommandDecoder() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java index bacf64601c9..a32fa5c9639 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java @@ -17,27 +17,25 @@ package com.mongodb.internal.operation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.connection.QueryResult; import com.mongodb.lang.Nullable; -import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; -import org.bson.BsonInt64; import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; @@ -49,30 +47,19 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListDatabasesOperation implements AsyncReadOperation>, ReadOperation> { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final Decoder decoder; private boolean retryReads; - - private long maxTimeMS; private BsonDocument filter; private Boolean nameOnly; private Boolean authorizedDatabasesOnly; private BsonValue comment; - public ListDatabasesOperation(final Decoder decoder) { + public ListDatabasesOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.decoder = notNull("decoder", decoder); } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListDatabasesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListDatabasesOperation filter(@Nullable final BsonDocument filter) { this.filter = filter; return this; @@ -121,13 +108,13 @@ public ListDatabasesOperation comment(@Nullable final BsonValue comment) { @Override public BatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, "admin", getCommandCreator(), + return executeRetryableRead(clientSideOperationTimeout, binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, "databases"), transformer(), retryReads); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { - executeRetryableReadAsync(binding, "admin", getCommandCreator(), + executeRetryableReadAsync(clientSideOperationTimeout, binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, "databases"), asyncTransformer(), retryReads, errorHandlingCallback(callback, LOGGER)); } @@ -147,24 +134,14 @@ private QueryResult createQueryResult(final BsonDocument result, final Connec } private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listDatabases", new BsonInt32(1)); - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - if (filter != null) { - command.put("filter", filter); - } - if (nameOnly != null) { - command.put("nameOnly", new BsonBoolean(nameOnly)); - } - if (authorizedDatabasesOnly != null) { - command.put("authorizedDatabases", new BsonBoolean(authorizedDatabasesOnly)); - } - putIfNotNull(command, "comment", comment); - return command; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument("listDatabases", new BsonInt32(1)); + putIfNotNull(command, "filter", filter); + putIfNotNull(command, "nameOnly", nameOnly); + putIfNotNull(command, "authorizedDatabases", authorizedDatabasesOnly); + putIfNotZero(command, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); + putIfNotNull(command, "comment", comment); + return command; + }; } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java index 62ecdc953bd..4426c63147a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackSupplier; @@ -27,13 +28,11 @@ import com.mongodb.internal.binding.ReadBinding; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; -import org.bson.BsonInt64; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.codecs.Codec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.notNull; @@ -50,6 +49,7 @@ import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.canRetryRead; import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; @@ -66,14 +66,17 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class ListIndexesOperation implements AsyncReadOperation>, ReadOperation> { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final Decoder decoder; private boolean retryReads; private int batchSize; - private long maxTimeMS; + private BsonValue comment; - public ListIndexesOperation(final MongoNamespace namespace, final Decoder decoder) { + public ListIndexesOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.decoder = notNull("decoder", decoder); } @@ -87,17 +90,6 @@ public ListIndexesOperation batchSize(final int batchSize) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); - } - - public ListIndexesOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public ListIndexesOperation retryReads(final boolean retryReads) { this.retryReads = retryReads; return this; @@ -124,8 +116,8 @@ public BatchCursor execute(final ReadBinding binding) { withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); try { - return createReadCommandAndExecute(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(), - createCommandDecoder(), transformer(), connection); + return createReadCommandAndExecute(clientSideOperationTimeout, retryState, binding, source, namespace.getDatabaseName(), + getCommandCreator(), createCommandDecoder(), transformer(), connection); } catch (MongoCommandException e) { return rethrowIfNotNamespaceError(e, createEmptyBatchCursor(namespace, decoder, source.getServerDescription().getAddress(), batchSize)); @@ -147,8 +139,9 @@ public void executeAsync(final AsyncReadBinding binding, final SingleResultCallb binding.getSessionContext()), releasingCallback)) { return; } - createReadCommandAndExecuteAsync(retryState, binding, source, namespace.getDatabaseName(), getCommandCreator(), - createCommandDecoder(), asyncTransformer(), connection, (result, t) -> { + createReadCommandAndExecuteAsync(clientSideOperationTimeout, retryState, binding, source, + namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), asyncTransformer(), + connection, (result, t) -> { if (t != null && !isNamespaceError(t)) { releasingCallback.onResult(null, t); } else { @@ -165,17 +158,14 @@ private AsyncBatchCursor emptyAsyncCursor(final AsyncConnectionSource source) } private CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> getCommand(); - } - - private BsonDocument getCommand() { - BsonDocument command = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName())) - .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); - if (maxTimeMS > 0) { - command.put("maxTimeMS", new BsonInt64(maxTimeMS)); - } - putIfNotNull(command, "comment", comment); - return command; + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument("listIndexes", new BsonString(namespace.getCollectionName())) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + + putIfNotZero(command, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); + putIfNotNull(command, "comment", comment); + return command; + }; } private CommandReadTransformer> transformer() { diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java index 4c471a16bd4..3551462d80d 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java @@ -20,6 +20,7 @@ import com.mongodb.MongoCommandException; import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; @@ -31,12 +32,10 @@ import org.bson.BsonValue; import org.bson.codecs.Decoder; -import java.util.Collections; -import java.util.concurrent.TimeUnit; - import static com.mongodb.internal.operation.AsyncOperationHelper.createEmptyAsyncBatchCursor; import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; import static com.mongodb.internal.operation.OperationHelper.createEmptyBatchCursor; +import static java.util.Collections.singletonList; /** * An operation that lists Alas Search indexes with the help of {@value #STAGE_LIST_SEARCH_INDEXES} pipeline stage. @@ -46,6 +45,7 @@ final class ListSearchIndexesOperation implements AsyncExplainableReadOperation>, ExplainableReadOperation> { private static final String STAGE_LIST_SEARCH_INDEXES = "$listSearchIndexes"; + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final Decoder decoder; @Nullable @@ -56,26 +56,20 @@ final class ListSearchIndexesOperation private final Collation collation; @Nullable private final BsonValue comment; - private final long maxTimeMS; @Nullable private final String indexName; private final boolean retryReads; - ListSearchIndexesOperation(final MongoNamespace namespace, - final Decoder decoder, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse, - final boolean retryReads) { + ListSearchIndexesOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final Decoder decoder, @Nullable final String indexName, @Nullable final Integer batchSize, + @Nullable final Collation collation, @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse, + final boolean retryReads) { + this.clientSideOperationTimeout = clientSideOperationTimeout; this.namespace = namespace; this.decoder = decoder; this.allowDiskUse = allowDiskUse; this.batchSize = batchSize; this.collation = collation; - this.maxTimeMS = maxTimeMS; this.comment = comment; this.indexName = indexName; this.retryReads = retryReads; @@ -125,13 +119,12 @@ private AggregateOperation asAggregateOperation() { BsonDocument searchDefinition = getSearchDefinition(); BsonDocument listSearchIndexesStage = new BsonDocument(STAGE_LIST_SEARCH_INDEXES, searchDefinition); - return new AggregateOperation<>(namespace, Collections.singletonList(listSearchIndexesStage), decoder) + return new AggregateOperation<>(clientSideOperationTimeout, namespace, singletonList(listSearchIndexesStage), decoder) .retryReads(retryReads) .collation(collation) .comment(comment) .allowDiskUse(allowDiskUse) - .batchSize(batchSize) - .maxTime(maxTimeMS, TimeUnit.MILLISECONDS); + .batchSize(batchSize); } @NonNull diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java index 482b4261d10..d7da495f96c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java @@ -21,6 +21,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -32,7 +33,6 @@ import org.bson.codecs.BsonDocumentCodec; import java.util.List; -import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.isTrue; @@ -53,7 +53,6 @@ import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * Operation that runs a Map Reduce against a MongoDB instance. This operation does not support "inline" results, i.e. the results will @@ -66,6 +65,7 @@ */ public class MapReduceToCollectionOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final BsonJavaScript mapFunction; private final BsonJavaScript reduceFunction; @@ -78,7 +78,6 @@ private int limit; private boolean jsMode; private boolean verbose; - private long maxTimeMS; private String action = "replace"; private String databaseName; private boolean sharded; @@ -87,14 +86,10 @@ private Collation collation; private static final List VALID_ACTIONS = asList("replace", "merge", "reduce"); - public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, final String collectionName) { - this(namespace, mapFunction, reduceFunction, collectionName, null); - } - - public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, @Nullable final String collectionName, - @Nullable final WriteConcern writeConcern) { + public MapReduceToCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final BsonJavaScript mapFunction, final BsonJavaScript reduceFunction, @Nullable final String collectionName, + @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -185,17 +180,6 @@ public MapReduceToCollectionOperation verbose(final boolean verbose) { return this; } - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - public MapReduceToCollectionOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - public String getAction() { return action; } @@ -295,9 +279,9 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai } private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { - return new CommandReadOperation<>(namespace.getDatabaseName(), - ExplainHelper.asExplainCommand(getCommand(null), explainVerbosity), - new BsonDocumentCodec()); + return new CommandReadOperation<>(clientSideOperationTimeout, namespace.getDatabaseName(), + ExplainHelper.asExplainCommand(getCommand(null), explainVerbosity), + new BsonDocumentCodec()); } private CommandWriteTransformer transformer() { @@ -316,6 +300,7 @@ private CommandWriteTransformerAsync transfor }; } + // TODO this should be a command creator passing in clientside operation timeout private BsonDocument getCommand(@Nullable final ConnectionDescription description) { BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName())); if (description != null && !serverIsAtLeastVersionFourDotFour(description)) { @@ -336,7 +321,7 @@ private BsonDocument getCommand(@Nullable final ConnectionDescription descriptio putIfNotNull(commandDocument, "scope", getScope()); putIfTrue(commandDocument, "verbose", isVerbose()); putIfNotZero(commandDocument, "limit", getLimit()); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); + putIfNotZero(commandDocument, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); putIfTrue(commandDocument, "jsMode", isJsMode()); if (bypassDocumentValidation != null && description != null) { commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java index 131591dd6e2..4ea2dae3011 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -20,6 +20,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.client.model.Collation; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncReadBinding; import com.mongodb.internal.binding.ReadBinding; @@ -34,8 +35,6 @@ import org.bson.codecs.BsonDocumentCodec; import org.bson.codecs.Decoder; -import java.util.concurrent.TimeUnit; - import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; @@ -50,7 +49,6 @@ import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION; import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; -import static java.util.concurrent.TimeUnit.MILLISECONDS; /** *

Operation that runs a Map Reduce against a MongoDB instance. This operation only supports "inline" results, i.e. the results will be @@ -62,6 +60,7 @@ */ public class MapReduceWithInlineResultsOperation implements AsyncReadOperation>, ReadOperation> { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final BsonJavaScript mapFunction; private final BsonJavaScript reduceFunction; @@ -73,11 +72,11 @@ public class MapReduceWithInlineResultsOperation implements AsyncReadOperatio private int limit; private boolean jsMode; private boolean verbose; - private long maxTimeMS; private Collation collation; - public MapReduceWithInlineResultsOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, - final BsonJavaScript reduceFunction, final Decoder decoder) { + public MapReduceWithInlineResultsOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final BsonJavaScript mapFunction, final BsonJavaScript reduceFunction, final Decoder decoder) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.mapFunction = notNull("mapFunction", mapFunction); this.reduceFunction = notNull("reduceFunction", reduceFunction); @@ -172,31 +171,18 @@ public MapReduceWithInlineResultsOperation collation(@Nullable final Collatio return this; } - - public long getMaxTime(final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - return timeUnit.convert(maxTimeMS, MILLISECONDS); - } - - - public MapReduceWithInlineResultsOperation maxTime(final long maxTime, final TimeUnit timeUnit) { - notNull("timeUnit", timeUnit); - this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); - return this; - } - - @Override public MapReduceBatchCursor execute(final ReadBinding binding) { - return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), + return executeRetryableRead(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, "results"), transformer(), false); } @Override public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); - executeRetryableReadAsync(binding, namespace.getDatabaseName(), getCommandCreator(binding.getSessionContext()), - CommandResultDocumentCodec.create(decoder, "results"), + executeRetryableReadAsync(clientSideOperationTimeout, binding, namespace.getDatabaseName(), + getCommandCreator(binding.getSessionContext()), CommandResultDocumentCodec.create(decoder, "results"), asyncTransformer(), false, errHandlingCallback); } @@ -209,8 +195,8 @@ public AsyncReadOperation asExplainableOperationAsync(final Explai } private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { - return new CommandReadOperation<>(namespace.getDatabaseName(), - asExplainCommand(getCommand(NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), + return new CommandReadOperation<>(clientSideOperationTimeout, namespace.getDatabaseName(), + asExplainCommand(getCommand(clientSideOperationTimeout, NoOpSessionContext.INSTANCE, MIN_WIRE_VERSION), explainVerbosity), new BsonDocumentCodec()); } @@ -225,10 +211,12 @@ private CommandReadTransformerAsync> } private CommandCreator getCommandCreator(final SessionContext sessionContext) { - return (serverDescription, connectionDescription) -> getCommand(sessionContext, connectionDescription.getMaxWireVersion()); + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> + getCommand(clientSideOperationTimeout, sessionContext, connectionDescription.getMaxWireVersion()); } - private BsonDocument getCommand(final SessionContext sessionContext, final int maxWireVersion) { + private BsonDocument getCommand(final ClientSideOperationTimeout clientSideOperationTimeout, final SessionContext sessionContext, + final int maxWireVersion) { BsonDocument commandDocument = new BsonDocument("mapreduce", new BsonString(namespace.getCollectionName())) .append("map", getMapFunction()) .append("reduce", getReduceFunction()) @@ -241,7 +229,7 @@ private BsonDocument getCommand(final SessionContext sessionContext, final int m putIfTrue(commandDocument, "verbose", isVerbose()); appendReadConcernToCommand(sessionContext, maxWireVersion, commandDocument); putIfNotZero(commandDocument, "limit", getLimit()); - putIfNotZero(commandDocument, "maxTimeMS", getMaxTime(MILLISECONDS)); + putIfNotZero(commandDocument, "maxTimeMS", clientSideOperationTimeout.getMaxTimeMS()); putIfTrue(commandDocument, "jsMode", isJsMode()); if (collation != null) { commandDocument.put("collation", collation.asDocument()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index fb54fb33994..f579ec64167 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -22,6 +22,7 @@ import com.mongodb.assertions.Assertions; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackLoop; import com.mongodb.internal.async.function.AsyncCallbackRunnable; @@ -76,6 +77,7 @@ */ public class MixedBulkWriteOperation implements AsyncWriteOperation, WriteOperation { private static final FieldNameValidator NO_OP_FIELD_NAME_VALIDATOR = new NoOpFieldNameValidator(); + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace namespace; private final List writeRequests; private final boolean ordered; @@ -85,11 +87,13 @@ public class MixedBulkWriteOperation implements AsyncWriteOperation writeRequests, - final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) { - this.ordered = ordered; + public MixedBulkWriteOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List writeRequests, final boolean ordered, final WriteConcern writeConcern, + final boolean retryWrites) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.namespace = notNull("namespace", namespace); this.writeRequests = notNull("writes", writeRequests); + this.ordered = ordered; this.writeConcern = notNull("writeConcern", writeConcern); this.retryWrites = retryWrites; isTrueArgument("writes is not an empty list", !writeRequests.isEmpty()); diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java index f0f6e72b680..e87d4731537 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -54,6 +54,7 @@ import com.mongodb.client.model.WriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.IndexRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -97,10 +98,12 @@ final class Operations { private final WriteConcern writeConcern; private final boolean retryWrites; private final boolean retryReads; + @Nullable + private final Long timeoutMS; Operations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, - final boolean retryReads) { + final boolean retryReads, @Nullable final Long timeoutMS) { this.namespace = namespace; this.documentClass = documentClass; this.readPreference = readPreference; @@ -109,6 +112,7 @@ final class Operations { this.writeConcern = writeConcern; this.retryWrites = retryWrites; this.retryReads = retryReads; + this.timeoutMS = timeoutMS; } @Nullable @@ -144,13 +148,19 @@ boolean isRetryReads() { return retryReads; } + @Nullable + public Long getTimeoutMS() { + return timeoutMS; + } + CountDocumentsOperation countDocuments(final Bson filter, final CountOptions options) { - CountDocumentsOperation operation = new CountDocumentsOperation(assertNotNull(namespace)) + + CountDocumentsOperation operation = new CountDocumentsOperation( + ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), assertNotNull(namespace)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .skip(options.getSkip()) .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()) .comment(options.getComment()); if (options.getHint() != null) { @@ -162,9 +172,9 @@ CountDocumentsOperation countDocuments(final Bson filter, final CountOptions opt } EstimatedDocumentCountOperation estimatedDocumentCount(final EstimatedDocumentCountOptions options) { - return new EstimatedDocumentCountOperation(assertNotNull(namespace)) + return new EstimatedDocumentCountOperation(ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), + assertNotNull(namespace)) .retryReads(retryReads) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .comment(options.getComment()); } @@ -185,14 +195,14 @@ FindOperation find(final MongoNamespace findNamespace, @Nulla private FindOperation createFindOperation(final MongoNamespace findNamespace, @Nullable final Bson filter, final Class resultClass, final FindOptions options) { - FindOperation operation = new FindOperation<>(findNamespace, codecRegistry.get(resultClass)) + FindOperation operation = new FindOperation<>( + ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)), + findNamespace, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? new BsonDocument() : filter.toBsonDocument(documentClass, codecRegistry)) .batchSize(options.getBatchSize()) .skip(options.getSkip()) .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .maxAwaitTime(options.getMaxAwaitTime(MILLISECONDS), MILLISECONDS) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .cursorType(options.getCursorType()) @@ -216,29 +226,25 @@ private FindOperation createFindOperation(final MongoNamespac return operation; } - DistinctOperation distinct(final String fieldName, @Nullable final Bson filter, - final Class resultClass, final long maxTimeMS, - final Collation collation, final BsonValue comment) { - return new DistinctOperation<>(assertNotNull(namespace), fieldName, codecRegistry.get(resultClass)) + DistinctOperation distinct(final String fieldName, @Nullable final Bson filter, final Class resultClass, + final long maxTimeMS, final Collation collation, final BsonValue comment) { + return new DistinctOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), assertNotNull(namespace), + fieldName, codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(filter == null ? null : filter.toBsonDocument(documentClass, codecRegistry)) - .maxTime(maxTimeMS, MILLISECONDS) .collation(collation) .comment(comment); - } AggregateOperation aggregate(final List pipeline, final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final Integer batchSize, - final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, - final BsonValue comment, - final Bson variables, final Boolean allowDiskUse, - final AggregationLevel aggregationLevel) { - return new AggregateOperation<>(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), - codecRegistry.get(resultClass), aggregationLevel) + final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final Integer batchSize, + final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, + final BsonValue comment, + final Bson variables, final Boolean allowDiskUse, + final AggregationLevel aggregationLevel) { + return new AggregateOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS, maxAwaitTimeMS), assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), codecRegistry.get(resultClass), aggregationLevel) .retryReads(retryReads) - .maxTime(maxTimeMS, MILLISECONDS) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) .allowDiskUse(allowDiskUse) .batchSize(batchSize) .collation(collation) @@ -251,9 +257,8 @@ AggregateToCollectionOperation aggregateToCollection(final List final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { - return new AggregateToCollectionOperation(assertNotNull(namespace), assertNotNull(toBsonDocumentList(pipeline)), - readConcern, writeConcern, aggregationLevel) - .maxTime(maxTimeMS, MILLISECONDS) + return new AggregateToCollectionOperation(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), readConcern, writeConcern, aggregationLevel) .allowDiskUse(allowDiskUse) .bypassDocumentValidation(bypassDocumentValidation) .collation(collation) @@ -271,11 +276,11 @@ MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, final com.mongodb.client.model.MapReduceAction action, final boolean nonAtomic, final boolean sharded, final Boolean bypassDocumentValidation, final Collation collation) { - MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation(assertNotNull(namespace), - new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), collectionName, writeConcern) + MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation( + ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), assertNotNull(namespace), new BsonJavaScript(mapFunction), + new BsonJavaScript(reduceFunction), collectionName, writeConcern) .filter(toBsonDocument(filter)) .limit(limit) - .maxTime(maxTimeMS, MILLISECONDS) .jsMode(jsMode) .scope(toBsonDocument(scope)) .sort(toBsonDocument(sort)) @@ -294,20 +299,15 @@ MapReduceToCollectionOperation mapReduceToCollection(final String databaseName, } MapReduceWithInlineResultsOperation mapReduce(final String mapFunction, final String reduceFunction, - @Nullable final String finalizeFunction, - final Class resultClass, - final Bson filter, final int limit, - final long maxTimeMS, final boolean jsMode, final Bson scope, - final Bson sort, final boolean verbose, - final Collation collation) { + @Nullable final String finalizeFunction, final Class resultClass, final Bson filter, final int limit, + final long maxTimeMS, final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, + final Collation collation) { MapReduceWithInlineResultsOperation operation = - new MapReduceWithInlineResultsOperation<>(assertNotNull(namespace), - new BsonJavaScript(mapFunction), - new BsonJavaScript(reduceFunction), + new MapReduceWithInlineResultsOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), + assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), codecRegistry.get(resultClass)) .filter(toBsonDocument(filter)) .limit(limit) - .maxTime(maxTimeMS, MILLISECONDS) .jsMode(jsMode) .scope(toBsonDocument(scope)) .sort(toBsonDocument(sort)) @@ -320,11 +320,11 @@ MapReduceWithInlineResultsOperation mapReduce(final String ma } FindAndDeleteOperation findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { - return new FindAndDeleteOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec()) + return new FindAndDeleteOperation<>(ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), writeConcern, retryWrites, getCodec()) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()) .hint(options.getHint()) .hintString(options.getHintString()) @@ -334,14 +334,13 @@ FindAndDeleteOperation findOneAndDelete(final Bson filter, final Find FindAndReplaceOperation findOneAndReplace(final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { - return new FindAndReplaceOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - documentToBsonDocument(replacement)) + return new FindAndReplaceOperation<>(ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), documentToBsonDocument(replacement)) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .hint(options.getHint()) @@ -351,14 +350,13 @@ FindAndReplaceOperation findOneAndReplace(final Bson filter, final TD } FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - assertNotNull(toBsonDocument(update))) + return new FindAndUpdateOperation<>(ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocument(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(toBsonDocumentList(options.getArrayFilters())) @@ -370,14 +368,13 @@ FindAndUpdateOperation findOneAndUpdate(final Bson filter, final Bson FindAndUpdateOperation findOneAndUpdate(final Bson filter, final List update, final FindOneAndUpdateOptions options) { - return new FindAndUpdateOperation<>(assertNotNull(namespace), writeConcern, retryWrites, getCodec(), - assertNotNull(toBsonDocumentList(update))) + return new FindAndUpdateOperation<>(ClientSideOperationTimeouts.create(timeoutMS, options.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocumentList(update))) .filter(toBsonDocument(filter)) .projection(toBsonDocument(options.getProjection())) .sort(toBsonDocument(options.getSort())) .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(toBsonDocumentList(options.getArrayFilters())) @@ -434,8 +431,7 @@ MixedBulkWriteOperation updateMany(final Bson filter, final List .comment(options.getComment()).let(options.getLet())); } - MixedBulkWriteOperation insertMany(final List documents, - final InsertManyOptions options) { + MixedBulkWriteOperation insertMany(final List documents, final InsertManyOptions options) { notNull("documents", documents); List requests = new ArrayList<>(documents.size()); for (TDocument document : documents) { @@ -448,13 +444,14 @@ MixedBulkWriteOperation insertMany(final List documents, requests.add(new InsertRequest(documentToBsonDocument(document))); } - return new MixedBulkWriteOperation(assertNotNull(namespace), requests, options.isOrdered(), writeConcern, retryWrites) - .bypassDocumentValidation(options.getBypassDocumentValidation()).comment(options.getComment()); + return new MixedBulkWriteOperation(ClientSideOperationTimeouts.create(timeoutMS), assertNotNull(namespace), + requests, options.isOrdered(), writeConcern, retryWrites) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()); } @SuppressWarnings("unchecked") - MixedBulkWriteOperation bulkWrite(final List> requests, - final BulkWriteOptions options) { + MixedBulkWriteOperation bulkWrite(final List> requests, final BulkWriteOptions options) { notNull("requests", requests); List writeRequests = new ArrayList<>(requests.size()); for (WriteModel writeModel : requests) { @@ -469,9 +466,8 @@ MixedBulkWriteOperation bulkWrite(final List replaceOneModel = (ReplaceOneModel) writeModel; - writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), documentToBsonDocument(replaceOneModel - .getReplacement()), - WriteRequest.Type.REPLACE) + writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), + documentToBsonDocument(replaceOneModel.getReplacement()), WriteRequest.Type.REPLACE) .upsert(replaceOneModel.getReplaceOptions().isUpsert()) .collation(replaceOneModel.getReplaceOptions().getCollation()) .hint(replaceOneModel.getReplaceOptions().getHint()) @@ -516,7 +512,8 @@ MixedBulkWriteOperation bulkWrite(final List CommandReadOperation commandRead(final Bson command, final Class resultClass) { notNull("command", command); notNull("resultClass", resultClass); - return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), assertNotNull(toBsonDocument(command)), - codecRegistry.get(resultClass)); + return new CommandReadOperation<>(ClientSideOperationTimeouts.create(timeoutMS), assertNotNull(namespace).getDatabaseName(), + assertNotNull(toBsonDocument(command)), codecRegistry.get(resultClass)); } DropDatabaseOperation dropDatabase() { - return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), getWriteConcern()); + return new DropDatabaseOperation(ClientSideOperationTimeouts.create(timeoutMS), assertNotNull(namespace).getDatabaseName(), + getWriteConcern()); } - CreateCollectionOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - CreateCollectionOperation operation = new CreateCollectionOperation(assertNotNull(namespace).getDatabaseName(), - collectionName, writeConcern) + CreateCollectionOperation operation = new CreateCollectionOperation(ClientSideOperationTimeouts.create(timeoutMS), + assertNotNull(namespace).getDatabaseName(), collectionName, writeConcern) .collation(createCollectionOptions.getCollation()) .capped(createCollectionOptions.isCapped()) .sizeInBytes(createCollectionOptions.getSizeInBytes()) @@ -580,7 +577,8 @@ CreateCollectionOperation createCollection(final String collectionName, final Cr DropCollectionOperation dropCollection( final DropCollectionOptions dropCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { - DropCollectionOperation operation = new DropCollectionOperation(assertNotNull(namespace), writeConcern); + DropCollectionOperation operation = new DropCollectionOperation(ClientSideOperationTimeouts.create(timeoutMS), + assertNotNull(namespace), writeConcern); Bson encryptedFields = dropCollectionOptions.getEncryptedFields(); if (encryptedFields != null) { operation.encryptedFields(assertNotNull(toBsonDocument(encryptedFields))); @@ -597,16 +595,16 @@ DropCollectionOperation dropCollection( RenameCollectionOperation renameCollection(final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { - return new RenameCollectionOperation(assertNotNull(namespace), newCollectionNamespace, writeConcern) - .dropTarget(renameCollectionOptions.isDropTarget()); + return new RenameCollectionOperation(ClientSideOperationTimeouts.create(timeoutMS), assertNotNull(namespace), + newCollectionNamespace, writeConcern).dropTarget(renameCollectionOptions.isDropTarget()); } CreateViewOperation createView(final String viewName, final String viewOn, final List pipeline, final CreateViewOptions createViewOptions) { notNull("options", createViewOptions); notNull("pipeline", pipeline); - return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, viewOn, - assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); + return new CreateViewOperation(ClientSideOperationTimeouts.create(timeoutMS), assertNotNull(namespace).getDatabaseName(), viewName, + viewOn, assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); } @SuppressWarnings("deprecation") @@ -641,8 +639,8 @@ CreateIndexesOperation createIndexes(final List indexes, final Creat .hidden(model.getOptions().isHidden()) ); } - return new CreateIndexesOperation(assertNotNull(namespace), indexRequests, writeConcern) - .maxTime(createIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS) + return new CreateIndexesOperation(ClientSideOperationTimeouts.create(timeoutMS, createIndexOptions.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), indexRequests, writeConcern) .commitQuorum(createIndexOptions.getCommitQuorum()); } @@ -668,46 +666,41 @@ DropSearchIndexOperation dropSearchIndex(final String indexName) { } - ListSearchIndexesOperation listSearchIndexes(final Class resultClass, - final long maxTimeMS, - @Nullable final String indexName, - @Nullable final Integer batchSize, - @Nullable final Collation collation, - @Nullable final BsonValue comment, - @Nullable final Boolean allowDiskUse) { - + ListSearchIndexesOperation listSearchIndexes(final Class resultClass, final long maxTimeMS, + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { - return new ListSearchIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass), maxTimeMS, - indexName, batchSize, collation, comment, allowDiskUse, retryReads); + return new ListSearchIndexesOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), assertNotNull(namespace), + codecRegistry.get(resultClass), indexName, batchSize, collation, comment, allowDiskUse, retryReads); } DropIndexOperation dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) { - return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern) - .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS); + return new DropIndexOperation(ClientSideOperationTimeouts.create(timeoutMS, dropIndexOptions.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), indexName, writeConcern); } DropIndexOperation dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) { - return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern) - .maxTime(dropIndexOptions.getMaxTime(MILLISECONDS), MILLISECONDS); + return new DropIndexOperation(ClientSideOperationTimeouts.create(timeoutMS, dropIndexOptions.getMaxTime(MILLISECONDS)), + assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern); } ListCollectionsOperation listCollections(final String databaseName, final Class resultClass, final Bson filter, final boolean collectionNamesOnly, @Nullable final Integer batchSize, final long maxTimeMS, final BsonValue comment) { - return new ListCollectionsOperation<>(databaseName, codecRegistry.get(resultClass)) + return new ListCollectionsOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), databaseName, + codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(collectionNamesOnly) .batchSize(batchSize == null ? 0 : batchSize) - .maxTime(maxTimeMS, MILLISECONDS) .comment(comment); } ListDatabasesOperation listDatabases(final Class resultClass, final Bson filter, final Boolean nameOnly, final long maxTimeMS, final Boolean authorizedDatabasesOnly, final BsonValue comment) { - return new ListDatabasesOperation<>(codecRegistry.get(resultClass)).maxTime(maxTimeMS, MILLISECONDS) + return new ListDatabasesOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), codecRegistry.get(resultClass)) .retryReads(retryReads) .filter(toBsonDocument(filter)) .nameOnly(nameOnly) @@ -717,10 +710,10 @@ ListDatabasesOperation listDatabases(final Class res ListIndexesOperation listIndexes(final Class resultClass, @Nullable final Integer batchSize, final long maxTimeMS, final BsonValue comment) { - return new ListIndexesOperation<>(assertNotNull(namespace), codecRegistry.get(resultClass)) + return new ListIndexesOperation<>(ClientSideOperationTimeouts.create(timeoutMS, maxTimeMS), assertNotNull(namespace), + codecRegistry.get(resultClass)) .retryReads(retryReads) .batchSize(batchSize == null ? 0 : batchSize) - .maxTime(maxTimeMS, MILLISECONDS) .comment(comment); } @@ -729,12 +722,14 @@ ChangeStreamOperation changeStream(final FullDocument fullDoc final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, final Collation collation, final BsonValue comment, final long maxAwaitTimeMS, final BsonDocument resumeToken, final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { - return new ChangeStreamOperation<>(assertNotNull(namespace), fullDocument, fullDocumentBeforeChange, + return new ChangeStreamOperation<>(ClientSideOperationTimeouts.create(timeoutMS, 0, maxAwaitTimeMS), + assertNotNull(namespace), + fullDocument, + fullDocumentBeforeChange, assertNotNull(toBsonDocumentList(pipeline)), decoder, changeStreamLevel) .batchSize(batchSize) .collation(collation) .comment(comment) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) .resumeAfter(resumeToken) .startAtOperationTime(startAtOperationTime) .startAfter(startAfter) diff --git a/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java index 139c3e6fd27..b1aac05bd48 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/QueryBatchCursor.java @@ -59,9 +59,9 @@ import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.operation.CursorHelper.getNumberToReturn; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; import static com.mongodb.internal.operation.QueryHelper.translateCommandException; import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; +import static com.mongodb.internal.operation.SyncOperationHelper.getMoreCursorDocumentToQueryResult; import static java.lang.String.format; import static java.util.Collections.singletonList; diff --git a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java index d6f7ee897ae..83d1fb4c1db 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java @@ -18,6 +18,7 @@ import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -48,17 +49,15 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public class RenameCollectionOperation implements AsyncWriteOperation, WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final MongoNamespace originalNamespace; private final MongoNamespace newNamespace; private final WriteConcern writeConcern; private boolean dropTarget; - public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace) { - this(originalNamespace, newNamespace, null); - } - - public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace, - @Nullable final WriteConcern writeConcern) { + public RenameCollectionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace originalNamespace, + final MongoNamespace newNamespace, @Nullable final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.originalNamespace = notNull("originalNamespace", originalNamespace); this.newNamespace = notNull("newNamespace", newNamespace); this.writeConcern = writeConcern; diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index 67d5acf9c37..22de416737a 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -19,6 +19,7 @@ import com.mongodb.MongoException; import com.mongodb.ReadPreference; import com.mongodb.ServerAddress; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.VisibleForTesting; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.async.function.AsyncCallbackBiFunction; @@ -167,16 +168,19 @@ private static T withConnectionSource(final ConnectionSource source, final C } static T executeRetryableRead( + final ClientSideOperationTimeout clientSideOperationTimeout, final ReadBinding binding, final String database, final CommandCreator commandCreator, final Decoder decoder, final CommandReadTransformer transformer, final boolean retryReads) { - return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator, decoder, transformer, retryReads); + return executeRetryableRead(clientSideOperationTimeout, binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads); } static T executeRetryableRead( + final ClientSideOperationTimeout clientSideOperationTimeout, final ReadBinding binding, final Supplier readConnectionSourceSupplier, final String database, @@ -188,7 +192,8 @@ static T executeRetryableRead( Supplier read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> withSourceAndConnection(readConnectionSourceSupplier, false, (source, connection) -> { retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getSessionContext())); - return createReadCommandAndExecute(retryState, binding, source, database, commandCreator, decoder, transformer, connection); + return createReadCommandAndExecute(clientSideOperationTimeout, retryState, binding, source, database, + commandCreator, decoder, transformer, connection); }) ); return read.get(); @@ -212,6 +217,7 @@ static T executeCommand(final WriteBinding binding, final String database, f } static R executeRetryableWrite( + final ClientSideOperationTimeout clientSideOperationTimeout, final WriteBinding binding, final String database, @Nullable final ReadPreference readPreference, @@ -234,7 +240,7 @@ static R executeRetryableWrite( .map(previousAttemptCommand -> { assertFalse(firstAttempt); return retryCommandModifier.apply(previousAttemptCommand); - }).orElseGet(() -> commandCreator.create(source.getServerDescription(), connection.getDescription())); + }).orElseGet(() -> commandCreator.create(clientSideOperationTimeout, source.getServerDescription(), connection.getDescription())); // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) .attach(AttachmentKeys.retryableCommandFlag(), CommandOperationHelper.isRetryWritesEnabled(command), true) @@ -260,6 +266,7 @@ static R executeRetryableWrite( @Nullable static T createReadCommandAndExecute( + final ClientSideOperationTimeout clientSideOperationTimeout, final RetryState retryState, final ReadBinding binding, final ConnectionSource source, @@ -268,7 +275,7 @@ static T createReadCommandAndExecute( final Decoder decoder, final CommandReadTransformer transformer, final Connection connection) { - BsonDocument command = commandCreator.create(source.getServerDescription(), connection.getDescription()); + BsonDocument command = commandCreator.create(clientSideOperationTimeout, source.getServerDescription(), connection.getDescription()); retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); return transformer.apply(assertNotNull(connection.command(database, command, new NoOpFieldNameValidator(), source.getReadPreference(), decoder, binding)), source, connection); diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java index 064196d2568..e75428c7c68 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java @@ -65,20 +65,20 @@ public final class SyncOperations { private final Operations operations; public SyncOperations(final Class documentClass, final ReadPreference readPreference, - final CodecRegistry codecRegistry, final boolean retryReads) { - this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads); + final CodecRegistry codecRegistry, final boolean retryReads, @Nullable final Long timeoutMS) { + this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutMS); } public SyncOperations(final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, - final CodecRegistry codecRegistry, final boolean retryReads) { - this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads); + final CodecRegistry codecRegistry, final boolean retryReads, @Nullable final Long timeoutMS) { + this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, retryReads, timeoutMS); } public SyncOperations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, - final boolean retryWrites, final boolean retryReads) { + final boolean retryWrites, final boolean retryReads, @Nullable final Long timeoutMS) { this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads); + retryWrites, retryReads, timeoutMS); } public ReadOperation countDocuments(final Bson filter, final CountOptions options) { @@ -111,22 +111,16 @@ public ReadOperation> distinct(final String field } public ExplainableReadOperation> aggregate(final List pipeline, - final Class resultClass, - final long maxTimeMS, final long maxAwaitTimeMS, - @Nullable final Integer batchSize, - final Collation collation, final Bson hint, - final String hintString, - final BsonValue comment, - final Bson variables, - final Boolean allowDiskUse, - final AggregationLevel aggregationLevel) { + final Class resultClass, final long maxTimeMS, final long maxAwaitTimeMS, @Nullable final Integer batchSize, + final Collation collation, final Bson hint, final String hintString, final BsonValue comment, final Bson variables, + final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { return operations.aggregate(pipeline, resultClass, maxTimeMS, maxAwaitTimeMS, batchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); } - public ReadOperation aggregateToCollection(final List pipeline, final long maxTimeMS, + public AggregateToCollectionOperation aggregateToCollection(final List pipeline, final long maxTimeMS, final Boolean allowDiskUse, final Boolean bypassDocumentValidation, - final Collation collation, final Bson hint, final String hintString, final BsonValue comment, + final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { return operations.aggregateToCollection(pipeline, maxTimeMS, allowDiskUse, bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel); @@ -226,7 +220,6 @@ public WriteOperation dropDatabase() { return operations.dropDatabase(); } - public WriteOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, @Nullable final AutoEncryptionSettings autoEncryptionSettings) { return operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings); diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java index 499623ebcce..9ccc78e1b5c 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java @@ -18,6 +18,7 @@ import com.mongodb.Function; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.async.SingleResultCallback; import com.mongodb.internal.binding.AsyncWriteBinding; import com.mongodb.internal.binding.WriteBinding; @@ -42,9 +43,11 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public abstract class TransactionOperation implements WriteOperation, AsyncWriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final WriteConcern writeConcern; - TransactionOperation(final WriteConcern writeConcern) { + TransactionOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final WriteConcern writeConcern) { + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.writeConcern = notNull("writeConcern", writeConcern); } @@ -55,20 +58,20 @@ public WriteConcern getWriteConcern() { @Override public Void execute(final WriteBinding binding) { isTrue("in transaction", binding.getSessionContext().hasActiveTransaction()); - return executeRetryableWrite(binding, "admin", null, new NoOpFieldNameValidator(), + return executeRetryableWrite(clientSideOperationTimeout, binding, "admin", null, new NoOpFieldNameValidator(), new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformer(), getRetryCommandModifier()); } @Override public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { isTrue("in transaction", binding.getSessionContext().hasActiveTransaction()); - executeRetryableWriteAsync(binding, "admin", null, new NoOpFieldNameValidator(), + executeRetryableWriteAsync(clientSideOperationTimeout, binding, "admin", null, new NoOpFieldNameValidator(), new BsonDocumentCodec(), getCommandCreator(), writeConcernErrorTransformerAsync(), getRetryCommandModifier(), errorHandlingCallback(callback, LOGGER)); } CommandCreator getCommandCreator() { - return (serverDescription, connectionDescription) -> { + return (clientSideOperationTimeout, serverDescription, connectionDescription) -> { BsonDocument command = new BsonDocument(getCommandName(), new BsonInt32(1)); if (!writeConcern.isServerDefault()) { command.put("writeConcern", writeConcern.asDocument()); diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index 85b4a9cfeac..4dd98e9ba96 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -32,6 +32,8 @@ import com.mongodb.connection.StreamFactoryFactory; import com.mongodb.connection.TlsChannelStreamFactoryFactory; import com.mongodb.connection.netty.NettyStreamFactoryFactory; +import com.mongodb.internal.ClientSideOperationTimeout; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.async.AsyncBatchCursor; import com.mongodb.internal.async.SingleResultCallback; @@ -78,6 +80,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.connection.ClusterConnectionMode.LOAD_BALANCED; @@ -112,7 +115,17 @@ public final class ClusterFixture { private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest"; private static final int COMMAND_NOT_FOUND_ERROR_CODE = 59; public static final long TIMEOUT = 60L; - public static final Duration TIMEOUT_DURATION = Duration.ofMinutes(1); + public static final Duration TIMEOUT_DURATION = Duration.ofSeconds(TIMEOUT); + + public static final Supplier CSOT_NO_TIMEOUT = () -> ClientSideOperationTimeouts.NO_TIMEOUT; + public static final Supplier CSOT_TIMEOUT = + () -> ClientSideOperationTimeouts.create(TIMEOUT_DURATION.toMillis()); + public static final Supplier CSOT_MAX_TIME = () -> + ClientSideOperationTimeouts.create(null, 100, 0, 0); + public static final Supplier CSOT_MAX_AWAIT_TIME = () -> + ClientSideOperationTimeouts.create(null, 0, 101, 0); + public static final Supplier CSOT_MAX_TIME_AND_MAX_AWAIT_TIME = () -> + ClientSideOperationTimeouts.create(null, 101, 1001, 0); public static final String LEGACY_HELLO = "isMaster"; private static ConnectionString connectionString; @@ -143,7 +156,7 @@ public static boolean clusterIsType(final ClusterType clusterType) { public static ServerVersion getServerVersion() { if (serverVersion == null) { - serverVersion = getVersion(new CommandReadOperation<>("admin", + serverVersion = getVersion(new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", new BsonDocument("buildInfo", new BsonInt32(1)), new BsonDocumentCodec()) .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE))); @@ -188,7 +201,8 @@ public static boolean hasEncryptionTestsEnabled() { } public static Document getServerStatus() { - return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), new DocumentCodec()) + return new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", new BsonDocument("serverStatus", new BsonInt32(1)), + new DocumentCodec()) .execute(getBinding()); } @@ -203,7 +217,7 @@ static class ShutdownHook extends Thread { @Override public void run() { if (cluster != null) { - new DropDatabaseOperation(getDefaultDatabaseName(), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new DropDatabaseOperation(CSOT_TIMEOUT.get(), getDefaultDatabaseName(), WriteConcern.ACKNOWLEDGED).execute(getBinding()); cluster.close(); } } @@ -241,9 +255,10 @@ public static synchronized ConnectionString getConnectionString() { Cluster cluster = createCluster(new ConnectionString(DEFAULT_URI), new SocketStreamFactory(SocketSettings.builder().build(), SslSettings.builder().build())); try { - BsonDocument helloResult = new CommandReadOperation<>("admin", - new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()).execute(new ClusterBinding(cluster, - ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE)); + BsonDocument helloResult = new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.nearest(), ReadConcern.DEFAULT, getServerApi(), + IgnorableRequestContext.INSTANCE)); if (helloResult.containsKey("setName")) { connectionString = new ConnectionString(DEFAULT_URI + "/?replicaSet=" + helloResult.getString("setName").getValue()); @@ -449,11 +464,7 @@ public static SslSettings getSslSettings(final ConnectionString connectionString public static ServerAddress getPrimary() { List serverDescriptions = getPrimaries(getCluster().getDescription()); while (serverDescriptions.isEmpty()) { - try { - sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + sleep(100); serverDescriptions = getPrimaries(getCluster().getDescription()); } return serverDescriptions.get(0).getAddress(); @@ -462,16 +473,20 @@ public static ServerAddress getPrimary() { public static ServerAddress getSecondary() { List serverDescriptions = getSecondaries(getCluster().getDescription()); while (serverDescriptions.isEmpty()) { - try { - sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + sleep(100); serverDescriptions = getSecondaries(getCluster().getDescription()); } return serverDescriptions.get(0).getAddress(); } + public static void sleep(final int sleepMS) { + try { + Thread.sleep(sleepMS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + @Nullable public static MongoCredential getCredential() { return getConnectionString().getCredential(); @@ -488,9 +503,8 @@ public static MongoCredentialWithCache getCredentialWithCache() { public static BsonDocument getServerParameters() { if (serverParameters == null) { - serverParameters = new CommandReadOperation<>("admin", - new BsonDocument("getParameter", new BsonString("*")), - new BsonDocumentCodec()) + serverParameters = new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", + new BsonDocument("getParameter", new BsonString("*")), new BsonDocumentCodec()) .execute(getBinding()); } return serverParameters; @@ -555,7 +569,7 @@ public static void configureFailPoint(final BsonDocument failPointDocument) { boolean failsPointsSupported = true; if (!isSharded()) { try { - new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) + new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", failPointDocument, new BsonDocumentCodec()) .execute(getBinding()); } catch (MongoCommandException e) { if (e.getErrorCode() == COMMAND_NOT_FOUND_ERROR_CODE) { @@ -571,7 +585,8 @@ public static void disableFailPoint(final String failPoint) { BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString(failPoint)) .append("mode", new BsonString("off")); try { - new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>(CSOT_TIMEOUT.get(), "admin", failPointDocument, new BsonDocumentCodec()) + .execute(getBinding()); } catch (MongoCommandException e) { // ignore } @@ -715,7 +730,7 @@ public static int getReferenceCountAfterTimeout(final ReferenceCounted reference if (System.currentTimeMillis() > startTime + 5000) { return count; } - sleep(10); + Thread.sleep(10); count = referenceCounted.getCount(); } catch (InterruptedException e) { throw new MongoInterruptedException("Interrupted", e); diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy index ddbb9f29a0d..0f81d28c5f8 100644 --- a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -64,6 +64,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.TIMEOUT import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget import static com.mongodb.ClusterFixture.executeAsync @@ -109,13 +110,14 @@ class OperationFunctionalSpecification extends Specification { } void acknowledgeWrite(final SingleConnectionBinding binding) { - new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false).execute(binding) + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(new BsonDocument())], true, + ACKNOWLEDGED, false).execute(binding) binding.release() } void acknowledgeWrite(final AsyncSingleConnectionBinding binding) { - executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, ACKNOWLEDGED, false), - binding) + executeAsync(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(new BsonDocument())], + true, ACKNOWLEDGED, false), binding) binding.release() } diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java index e2216629a7a..c1657bd60e5 100644 --- a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java @@ -25,6 +25,8 @@ import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.IndexOptionDefaults; import com.mongodb.client.model.ValidationOptions; +import com.mongodb.internal.ClientSideOperationTimeout; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.binding.AsyncReadWriteBinding; import com.mongodb.internal.binding.ReadBinding; import com.mongodb.internal.binding.WriteBinding; @@ -62,8 +64,10 @@ import java.util.ArrayList; import java.util.List; +import java.util.function.Supplier; import java.util.stream.Collectors; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.ClusterFixture.executeAsync; import static com.mongodb.ClusterFixture.getBinding; import static java.util.Arrays.asList; @@ -71,6 +75,7 @@ public final class CollectionHelper { + private static final Supplier CSOT_SUPPLIER = () -> ClientSideOperationTimeouts.create(10_000L); private final Codec codec; private final CodecRegistry registry = MongoClientSettings.getDefaultCodecRegistry(); private final MongoNamespace namespace; @@ -81,7 +86,8 @@ public CollectionHelper(final Codec codec, final MongoNamespace namespace) { } public T hello() { - return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec).execute(getBinding()); + return new CommandReadOperation<>(CSOT_SUPPLIER.get(), "admin", BsonDocument.parse("{isMaster: 1}"), codec) + .execute(getBinding()); } public static void drop(final MongoNamespace namespace) { @@ -89,7 +95,7 @@ public static void drop(final MongoNamespace namespace) { } public static void drop(final MongoNamespace namespace, final WriteConcern writeConcern) { - new DropCollectionOperation(namespace, writeConcern).execute(getBinding()); + new DropCollectionOperation(CSOT_SUPPLIER.get(), namespace, writeConcern).execute(getBinding()); } public static void dropDatabase(final String name) { @@ -101,7 +107,7 @@ public static void dropDatabase(final String name, final WriteConcern writeConce return; } try { - new DropDatabaseOperation(name, writeConcern).execute(getBinding()); + new DropDatabaseOperation(CSOT_SUPPLIER.get(), name, writeConcern).execute(getBinding()); } catch (MongoCommandException e) { if (!e.getErrorMessage().contains("ns not found")) { throw e; @@ -135,7 +141,8 @@ public void create(final String collectionName, final CreateCollectionOptions op public void create(final String collectionName, final CreateCollectionOptions options, final WriteConcern writeConcern) { drop(namespace, writeConcern); - CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, writeConcern) + CreateCollectionOperation operation = new CreateCollectionOperation(CSOT_SUPPLIER.get(), namespace.getDatabaseName(), collectionName, + writeConcern) .capped(options.isCapped()) .sizeInBytes(options.getSizeInBytes()) .maxDocuments(options.getMaxDocuments()); @@ -162,7 +169,7 @@ public void killCursor(final MongoNamespace namespace, final ServerCursor server BsonDocument command = new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); try { - new CommandReadOperation<>(namespace.getDatabaseName(), command, new BsonDocumentCodec()) + new CommandReadOperation<>(CSOT_SUPPLIER.get(), namespace.getDatabaseName(), command, new BsonDocumentCodec()) .execute(getBinding()); } catch (Exception e) { // Ignore any exceptions killing old cursors @@ -191,7 +198,7 @@ public void insertDocuments(final List documents, final WriteConce for (BsonDocument document : documents) { insertRequests.add(new InsertRequest(document)); } - new MixedBulkWriteOperation(namespace, insertRequests, true, writeConcern, false).execute(binding); + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, insertRequests, true, writeConcern, false).execute(binding); } public void insertDocuments(final Document... documents) { @@ -232,7 +239,7 @@ public List find() { } public List find(final Codec codec) { - BatchCursor cursor = new FindOperation<>(namespace, codec) + BatchCursor cursor = new FindOperation<>(CSOT_SUPPLIER.get(), namespace, codec) .sort(new BsonDocument("_id", new BsonInt32(1))) .execute(getBinding()); List results = new ArrayList<>(); @@ -247,7 +254,7 @@ public void updateOne(final Bson filter, final Bson update) { } public void updateOne(final Bson filter, final Bson update, final boolean isUpsert) { - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), update.toBsonDocument(Document.class, registry), WriteRequest.Type.UPDATE) @@ -257,7 +264,7 @@ public void updateOne(final Bson filter, final Bson update, final boolean isUpse } public void replaceOne(final Bson filter, final Bson update, final boolean isUpsert) { - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), update.toBsonDocument(Document.class, registry), WriteRequest.Type.REPLACE) @@ -267,7 +274,7 @@ public void replaceOne(final Bson filter, final Bson update, final boolean isUps } public void deleteOne(final Bson filter) { - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))), true, WriteConcern.ACKNOWLEDGED, false) .execute(getBinding()); @@ -290,11 +297,11 @@ public List aggregateDb(final List pipeline) { } private List aggregate(final List pipeline, final Decoder decoder, final AggregationLevel level) { - List bsonDocumentPipeline = new ArrayList(); + List bsonDocumentPipeline = new ArrayList<>(); for (Bson cur : pipeline) { bsonDocumentPipeline.add(cur.toBsonDocument(Document.class, registry)); } - BatchCursor cursor = new AggregateOperation(namespace, bsonDocumentPipeline, decoder, level) + BatchCursor cursor = new AggregateOperation<>(CSOT_SUPPLIER.get(), namespace, bsonDocumentPipeline, decoder, level) .execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { @@ -329,8 +336,8 @@ public List find(final BsonDocument filter, final BsonDocument sort, fina } public List find(final BsonDocument filter, final BsonDocument sort, final BsonDocument projection, final Decoder decoder) { - BatchCursor cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort).projection(projection) - .execute(getBinding()); + BatchCursor cursor = new FindOperation<>(CSOT_SUPPLIER.get(), namespace, decoder).filter(filter).sort(sort) + .projection(projection).execute(getBinding()); List results = new ArrayList<>(); while (cursor.hasNext()) { results.addAll(cursor.next()); @@ -343,15 +350,16 @@ public long count() { } public long count(final ReadBinding binding) { - return new CountDocumentsOperation(namespace).execute(binding); + return new CountDocumentsOperation(CSOT_SUPPLIER.get(), namespace).execute(binding); } public long count(final AsyncReadWriteBinding binding) throws Throwable { - return executeAsync(new CountDocumentsOperation(namespace), binding); + return executeAsync(new CountDocumentsOperation(CSOT_SUPPLIER.get(), namespace), binding); } public long count(final Bson filter) { - return new CountDocumentsOperation(namespace).filter(toBsonDocument(filter)).execute(getBinding()); + return new CountDocumentsOperation(CSOT_SUPPLIER.get(), namespace) + .filter(toBsonDocument(filter)).execute(getBinding()); } public BsonDocument wrap(final Document document) { @@ -363,37 +371,42 @@ public BsonDocument toBsonDocument(final Bson document) { } public void createIndex(final BsonDocument key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); } public void createIndex(final Document key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); } public void createUniqueIndex(final Document key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).unique(true)), WriteConcern.ACKNOWLEDGED) + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, singletonList(new IndexRequest(wrap(key)).unique(true)), + WriteConcern.ACKNOWLEDGED) .execute(getBinding()); } public void createIndex(final Document key, final String defaultLanguage) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), - WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, + singletonList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public void createIndex(final Bson key) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(key.toBsonDocument(Document.class, registry))), - WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, + singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); } @SuppressWarnings("deprecation") public void createIndex(final Bson key, final Double bucketSize) { - new CreateIndexesOperation(namespace, asList(new IndexRequest(key.toBsonDocument(Document.class, registry)) - .bucketSize(bucketSize)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + new CreateIndexesOperation(CSOT_SUPPLIER.get(), namespace, + singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry)).bucketSize(bucketSize)), + WriteConcern.ACKNOWLEDGED).execute(getBinding()); } public List listIndexes(){ List indexes = new ArrayList<>(); - BatchCursor cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()).execute(getBinding()); + BatchCursor cursor = new ListIndexesOperation<>(CSOT_SUPPLIER.get(), namespace, new BsonDocumentCodec()) + .execute(getBinding()); while (cursor.hasNext()) { indexes.addAll(cursor.next()); } @@ -402,8 +415,8 @@ public List listIndexes(){ public void killAllSessions() { try { - new CommandReadOperation<>("admin", new BsonDocument("killAllSessions", new BsonArray()), - new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>(CSOT_SUPPLIER.get(), "admin", + new BsonDocument("killAllSessions", new BsonArray()), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { // ignore exception caused by killing the implicit session that the killAllSessions command itself is running in } @@ -411,10 +424,9 @@ public void killAllSessions() { public void renameCollection(final MongoNamespace newNamespace) { try { - new CommandReadOperation<>("admin", + new CommandReadOperation<>(CSOT_SUPPLIER.get(), "admin", new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName())) - .append("to", new BsonString(newNamespace.getFullName())), - new BsonDocumentCodec()).execute(getBinding()); + .append("to", new BsonString(newNamespace.getFullName())), new BsonDocumentCodec()).execute(getBinding()); } catch (MongoCommandException e) { // do nothing } @@ -425,10 +437,12 @@ public void runAdminCommand(final String command) { } public void runAdminCommand(final BsonDocument command) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding()); + new CommandReadOperation<>(CSOT_SUPPLIER.get(), "admin", command, new BsonDocumentCodec()) + .execute(getBinding()); } public void runAdminCommand(final BsonDocument command, final ReadPreference readPreference) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(getBinding(readPreference)); + new CommandReadOperation<>(CSOT_SUPPLIER.get(), "admin", command, new BsonDocumentCodec()) + .execute(getBinding(readPreference)); } } diff --git a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy index d75d6ef489e..c91008e0c8b 100644 --- a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy @@ -23,6 +23,7 @@ import org.bson.BsonDocument import org.bson.BsonInt32 import org.bson.codecs.BsonDocumentCodec +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.LEGACY_HELLO import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize @@ -65,7 +66,7 @@ class ConnectionSpecification extends OperationFunctionalSpecification { source?.release() } private static BsonDocument getHelloResult() { - new CommandReadOperation('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), - new BsonDocumentCodec()).execute(getBinding()) + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), + new BsonDocumentCodec()).execute(getBinding()) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy index 44205922a0a..94f7422d724 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy @@ -35,6 +35,7 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import spock.lang.Specification +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.createAsyncCluster import static com.mongodb.ClusterFixture.createCluster import static com.mongodb.ClusterFixture.getBinding @@ -88,14 +89,14 @@ class ScramSha256AuthenticationSpecification extends Specification { .append('pwd', password) .append('roles', ['root']) .append('mechanisms', mechanisms) - new CommandReadOperation<>('admin', + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(createUserCommand, new DocumentCodec()), new DocumentCodec()) .execute(getBinding()) } def dropUser(final String userName) { - new CommandReadOperation<>('admin', new BsonDocument('dropUser', new BsonString(userName)), - new BsonDocumentCodec()).execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocument('dropUser', new BsonString(userName)), + new BsonDocumentCodec()).execute(getBinding()) } def 'test authentication and authorization'() { @@ -103,7 +104,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE)) @@ -125,7 +126,7 @@ class ScramSha256AuthenticationSpecification extends Specification { when: // make this synchronous - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE), @@ -147,7 +148,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE)) @@ -168,7 +169,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def callback = new FutureResultCallback() when: - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE), callback) @@ -189,7 +190,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def cluster = createCluster(credential) when: - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE)) @@ -210,7 +211,7 @@ class ScramSha256AuthenticationSpecification extends Specification { def callback = new FutureResultCallback() when: - new CommandReadOperation('admin', + new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'admin', new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, getServerApi(), IgnorableRequestContext.INSTANCE), callback) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy index fe7cd511c0c..aea23549530 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy @@ -21,6 +21,7 @@ import org.bson.BsonDocument import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.WriteConcern.MAJORITY @@ -33,13 +34,13 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific def expectedCommand = BsonDocument.parse('{abortTransaction: 1}') when: - def operation = new AbortTransactionOperation(ACKNOWLEDGED) + def operation = new AbortTransactionOperation(CSOT_NO_TIMEOUT.get(), ACKNOWLEDGED) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) when: - operation = new AbortTransactionOperation(MAJORITY) + operation = new AbortTransactionOperation(CSOT_NO_TIMEOUT.get(), MAJORITY) expectedCommand.put('writeConcern', MAJORITY.asDocument()) then: @@ -56,14 +57,14 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific when: def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) - def operation = new AbortTransactionOperation(writeConcern) + def operation = new AbortTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) then: testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) when: writeConcern = MAJORITY - operation = new AbortTransactionOperation(writeConcern) + operation = new AbortTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) expectedCommand.put('writeConcern', writeConcern.asDocument()) then: @@ -71,7 +72,7 @@ class AbortTransactionOperationSpecification extends OperationFunctionalSpecific when: writeConcern = ACKNOWLEDGED - operation = new AbortTransactionOperation(writeConcern) + operation = new AbortTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) expectedCommand.remove('writeConcern') then: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy index 1a01d4dd926..7e0260b5511 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification @@ -52,6 +51,9 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import static QueryOperationHelper.getKeyPattern +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.collectCursorResults import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint @@ -68,8 +70,6 @@ import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS class AggregateOperationSpecification extends OperationFunctionalSpecification { @@ -82,14 +82,12 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should have the correct defaults'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) then: operation.getAllowDiskUse() == null operation.getBatchSize() == null operation.getCollation() == null - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 operation.getPipeline() == [] } @@ -98,27 +96,23 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def hint = BsonDocument.parse('{a: 1}') when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) .allowDiskUse(true) .batchSize(10) .collation(defaultCollation) .hint(hint) - .maxAwaitTime(10, MILLISECONDS) - .maxTime(10, MILLISECONDS) then: operation.getAllowDiskUse() operation.getBatchSize() == 10 operation.getCollation() == defaultCollation - operation.getMaxAwaitTime(MILLISECONDS) == 10 - operation.getMaxTime(MILLISECONDS) == 10 operation.getHint() == hint } def 'should throw when using invalid hint'() { given: def hint = new BsonString('ok') - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).hint(hint) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()).hint(hint) when: operation.getHint() @@ -142,19 +136,27 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should create the expected command'() { when: def pipeline = [new BsonDocument('$match', new BsonDocument('a', new BsonString('A')))] - def operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, pipeline, new DocumentCodec()) + + def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + .append('cursor', new BsonDocument()) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + when: + operation = new AggregateOperation(CSOT_MAX_TIME.get(), helper.namespace, pipeline, new DocumentCodec()) .allowDiskUse(true) .batchSize(10) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) - .maxTime(10, MILLISECONDS) - def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) .append('pipeline', new BsonArray(pipeline)) .append('allowDiskUse', new BsonBoolean(true)) .append('collation', defaultCollation.asDocument()) .append('cursor', new BsonDocument('batchSize', new BsonInt32(10))) - .append('maxTimeMS', new BsonInt32(10)) + .append('maxTimeMS', new BsonInt64(100)) then: testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) @@ -169,7 +171,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def document = BsonDocument.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}')] - def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), namespace, pipeline, new BsonDocumentCodec()) .collation(caseInsensitiveCollation) when: @@ -187,7 +189,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { given: def expected = [createExpectedChangeNotification(namespace, 0), createExpectedChangeNotification(namespace, 1)] def pipeline = ['{$changeStream: {}}', '{$project: {"_id.clusterTime": 0, "_id.uuid": 0}}'].collect { BsonDocument.parse(it) } - def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), namespace, pipeline, new BsonDocumentCodec()) def helper = getCollectionHelper() when: @@ -214,7 +216,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should be able to aggregate'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -232,11 +234,11 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def viewSuffix = '-view' def viewName = getCollectionName() + viewSuffix def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) - new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], WriteConcern.ACKNOWLEDGED) + new CreateViewOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), viewName, getCollectionName(), [], WriteConcern.ACKNOWLEDGED) .execute(getBinding(getCluster())) when: - AggregateOperation operation = new AggregateOperation(viewNamespace, [], new DocumentCodec()) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), viewNamespace, [], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -245,7 +247,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { results.containsAll(['Pete', 'Sam']) cleanup: - new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding(getCluster())) + new DropCollectionOperation(CSOT_TIMEOUT.get(), viewNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding(getCluster())) where: async << [true, false] @@ -253,7 +255,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should be able to aggregate with pipeline'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber')))], new DocumentCodec()) def batchCursor = execute(operation, async) def results = collectCursorResults(batchCursor)*.getString('name') @@ -268,7 +270,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow disk usage'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).allowDiskUse(allowDiskUse) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) + .allowDiskUse(allowDiskUse) def cursor = operation.execute(getBinding()) then: @@ -280,7 +283,8 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should allow batch size'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).batchSize(batchSize) + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) + .batchSize(batchSize) def cursor = operation.execute(getBinding()) then: @@ -293,7 +297,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { @IgnoreIf({ isSharded() }) def 'should throw execution timeout exception from execute'() { given: - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).maxTime(1, SECONDS) + def operation = new AggregateOperation(csot.get(), getNamespace(), [], new DocumentCodec()) enableMaxTimeFailPoint() when: @@ -306,13 +310,13 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { disableMaxTimeFailPoint() where: - async << [true, false] + [async, csot] << [[true, false], [CSOT_MAX_TIME, CSOT_TIMEOUT]].combinations() } @IgnoreIf({ serverVersionLessThan(3, 6) }) def 'should be able to explain an empty pipeline'() { given: - def operation = new AggregateOperation(getNamespace(), [], new BsonDocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new BsonDocumentCodec()) operation = async ? operation.asAsyncExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) : operation.asExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) @@ -329,7 +333,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to aggregate with collation'() { when: - AggregateOperation operation = new AggregateOperation(getNamespace(), + AggregateOperation operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [BsonDocument.parse('{$match: {job : "plumber"}}')], new DocumentCodec() ).collation(options) def batchCursor = execute(operation, async) @@ -350,7 +354,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def index = new BsonDocument('a', new BsonInt32(1)) collectionHelper.createIndex(index) - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) .hint(hint) when: @@ -368,10 +372,10 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) .comment(new BsonString(expectedComment)) when: @@ -382,34 +386,14 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() where: async << [true, false] } - @IgnoreIf({ isSharded() || serverVersionLessThan(3, 2) }) - def 'should be able to respect maxTime with pipeline'() { - given: - enableMaxTimeFailPoint() - AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) - .maxTime(10, MILLISECONDS) - - when: - execute(operation, async) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() - - where: - async << [true, false] - } - def 'should add read concern to command'() { given: def binding = Stub(ReadBinding) @@ -427,7 +411,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) when: operation.execute(binding) @@ -469,7 +453,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], new DocumentCodec()) when: executeAsync(operation, binding) @@ -497,7 +481,7 @@ class AggregateOperationSpecification extends OperationFunctionalSpecification { def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new AggregateOperation(helper.namespace, [], new BsonDocumentCodec()) + def operation = new AggregateOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, [], new BsonDocumentCodec()) then: testOperationSecondaryOk(operation, [2, 6, 0], readPreference, async, helper.cursorResult) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy index a7aa377e855..f059d400fe3 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy @@ -29,6 +29,8 @@ import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.Filters import com.mongodb.client.model.ValidationOptions import com.mongodb.client.test.CollectionHelper +import com.mongodb.internal.ClientSideOperationTimeout +import com.mongodb.internal.client.model.AggregationLevel import org.bson.BsonArray import org.bson.BsonBoolean import org.bson.BsonDocument @@ -40,17 +42,16 @@ import org.bson.codecs.BsonValueCodecProvider import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.client.model.Filters.gte -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders class AggregateToCollectionOperationSpecification extends OperationFunctionalSpecification { @@ -71,11 +72,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), pipeline, ACKNOWLEDGED) then: operation.getAllowDiskUse() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getPipeline() == pipeline operation.getBypassDocumentValidation() == null operation.getWriteConcern() == ACKNOWLEDGED @@ -87,15 +87,13 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, WriteConcern.MAJORITY) + AggregateToCollectionOperation operation = createOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), pipeline, WriteConcern.MAJORITY) .allowDiskUse(true) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) then: operation.getAllowDiskUse() - operation.getMaxTime(MILLISECONDS) == 10 operation.getBypassDocumentValidation() == true operation.getWriteConcern() == WriteConcern.MAJORITY operation.getCollation() == defaultCollation @@ -106,15 +104,13 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] when: - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.DEFAULT) + AggregateToCollectionOperation operation = createOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), pipeline, ReadConcern.DEFAULT) .allowDiskUse(true) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) then: operation.getAllowDiskUse() - operation.getMaxTime(MILLISECONDS) == 10 operation.getBypassDocumentValidation() == true operation.getReadConcern() == ReadConcern.DEFAULT operation.getCollation() == defaultCollation @@ -122,7 +118,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should not accept an empty pipeline'() { when: - new AggregateToCollectionOperation(getNamespace(), [], ACKNOWLEDGED) + createOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], ACKNOWLEDGED) then: @@ -131,10 +127,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to output to a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), + [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], + ACKNOWLEDGED) execute(operation, async) then: @@ -147,9 +142,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to merge into a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), + [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) execute(operation, async) then: @@ -161,11 +155,9 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should be able to match then output to a collection'() { when: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), - new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), + [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), + new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED) execute(operation, async) then: @@ -177,12 +169,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should throw execution timeout exception from execute'() { given: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), - [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), - new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], - ACKNOWLEDGED) - .maxTime(1, SECONDS) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), + [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), + new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], + ACKNOWLEDGED) enableMaxTimeFailPoint() when: @@ -201,13 +191,12 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) def 'should throw on write concern error'() { given: - AggregateToCollectionOperation operation = - new AggregateToCollectionOperation(getNamespace(), + AggregateToCollectionOperation operation =createOperation(CSOT_TIMEOUT.get(), getNamespace(), [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], new WriteConcern(5)) when: - async ? executeAsync(operation) : operation.execute(getBinding()) + execute(operation, async) then: def ex = thrown(MongoWriteConcernException) @@ -227,8 +216,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 9 }')) when: - def operation = new AggregateToCollectionOperation(getNamespace(), [BsonDocument.parse('{$out: "collectionOut"}')], - ACKNOWLEDGED) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), + [BsonDocument.parse('{$out: "collectionOut"}')], ACKNOWLEDGED) execute(operation, async) then: @@ -256,7 +245,8 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should create the expected command'() { when: def pipeline = [BsonDocument.parse('{$out: "collectionOut"}')] - def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ReadConcern.MAJORITY, WriteConcern.MAJORITY) + AggregateToCollectionOperation operation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), pipeline, + ReadConcern.MAJORITY, WriteConcern.MAJORITY) .bypassDocumentValidation(true) def expectedCommand = new BsonDocument('aggregate', new BsonString(getNamespace().getCollectionName())) .append('pipeline', new BsonArray(pipeline)) @@ -298,7 +288,7 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(BsonDocument.parse('{_id: 1, str: "foo"}')) def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}'), new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] - def operation = new AggregateToCollectionOperation(getNamespace(), pipeline, ACKNOWLEDGED).collation(defaultCollation) + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), pipeline, ACKNOWLEDGED) .collation(caseInsensitiveCollation) when: @@ -315,10 +305,10 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new AggregateToCollectionOperation(getNamespace(), + AggregateToCollectionOperation operation = createOperation(CSOT_TIMEOUT.get(), getNamespace(), [Aggregates.out('outputCollection').toBsonDocument(BsonDocument, registry)], ACKNOWLEDGED) .comment(new BsonString(expectedComment)) @@ -330,11 +320,27 @@ class AggregateToCollectionOperationSpecification extends OperationFunctionalSpe ((Document) profileDocument.get('command')).get('comment') == expectedComment cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) profileCollectionHelper.drop() where: async << [true, false] } + + def createOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline) { + new AggregateToCollectionOperation(clientSideOperationTimeout, namespace, pipeline, null, null, AggregationLevel.COLLECTION) + } + + def createOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final WriteConcern writeConcern) { + new AggregateToCollectionOperation(clientSideOperationTimeout, namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION) + } + + def createOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final List pipeline, final ReadConcern readConcern) { + new AggregateToCollectionOperation(clientSideOperationTimeout, namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION) + } + } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy index f403d4b053b..8d5ac08d265 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy @@ -33,6 +33,7 @@ import org.bson.Document import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -53,7 +54,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe given: def helper = getHelper() def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -90,7 +91,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] def failPointDocument = createFailPointDocument('getMore', 10107) - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -123,7 +124,7 @@ class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpe def 'should not resume for aggregation errors'() { given: def pipeline = [BsonDocument.parse('{$unsupportedStage: {_id: 0}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy index 2325061b25b..f618a17b4a8 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy @@ -52,6 +52,7 @@ import org.bson.codecs.DocumentCodec import org.bson.codecs.ValueCodecProvider import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getAsyncCluster import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isStandalone @@ -60,7 +61,6 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.client.model.changestream.ChangeStreamDocument.createCodec import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion -import static java.util.concurrent.TimeUnit.MILLISECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders @IgnoreIf({ !(serverVersionAtLeast(3, 6) && !isStandalone()) }) @@ -68,32 +68,29 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should have the correct defaults'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.DEFAULT, + ChangeStreamOperation operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) then: operation.getBatchSize() == null operation.getCollation() == null operation.getFullDocument() == FullDocument.DEFAULT - operation.getMaxAwaitTime(MILLISECONDS) == 0 operation.getPipeline() == [] operation.getStartAtOperationTime() == null } def 'should set optional values correctly'() { when: - ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) + ChangeStreamOperation operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) .batchSize(5) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) .startAtOperationTime(new BsonTimestamp(99)) then: operation.getBatchSize() == 5 operation.getCollation() == defaultCollation operation.getFullDocument() == FullDocument.UPDATE_LOOKUP - operation.getMaxAwaitTime(MILLISECONDS) == 15 operation.getStartAtOperationTime() == new BsonTimestamp(99) } @@ -114,11 +111,10 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio .append('cursor', new BsonDocument('id', new BsonInt64(0)).append('ns', new BsonString('db.coll')) .append('firstBatch', new BsonArrayWrapper([]))) - def operation = new ChangeStreamOperation(namespace, FullDocument.DEFAULT, - FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel) + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), namespace, FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel as ChangeStreamLevel) .batchSize(5) .collation(defaultCollation) - .maxAwaitTime(15, MILLISECONDS) .startAtOperationTime(new BsonTimestamp()) def expectedCommand = new BsonDocument('aggregate', aggregate) @@ -151,7 +147,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -191,7 +187,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) @@ -218,7 +214,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "update"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -246,7 +242,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "replace"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -274,7 +270,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "delete"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -302,7 +298,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "invalidate"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -331,7 +327,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "drop"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -360,7 +356,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "dropDatabase"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider())), ChangeStreamLevel.DATABASE) @@ -390,8 +386,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "rename"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.DEFAULT, pipeline, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) def newNamespace = new MongoNamespace('JavaDriverTest', 'newCollectionName') helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) @@ -419,7 +415,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio given: def helper = getHelper() def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -443,7 +439,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -472,7 +468,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) when: @@ -513,7 +509,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -549,7 +545,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -586,7 +582,7 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def helper = getHelper() def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) def cursor = execute(operation, async) @@ -619,8 +615,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio def 'should support hasNext on the sync API'() { given: def helper = getHelper() - def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange - .DEFAULT, [], CODEC) + def operation = new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) when: def cursor = execute(operation, false) @@ -660,7 +656,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .execute(binding) @@ -669,7 +666,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .execute(binding) @@ -679,7 +677,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .execute(binding) @@ -717,7 +716,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio } when: 'set resumeAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .resumeAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -726,7 +726,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio !changeStream.containsKey('startAtOperationTime') when: 'set startAfter' - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAfter(new BsonDocument()) .executeAsync(binding, Stub(SingleResultCallback)) @@ -736,7 +737,8 @@ class ChangeStreamOperationSpecification extends OperationFunctionalSpecificatio when: 'set startAtOperationTime' def startAtTime = new BsonTimestamp(42) - new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], CODEC) + new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) .startAtOperationTime(startAtTime) .executeAsync(binding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy index 3d99ac477a4..6720dc6fb76 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation -import util.spock.annotations.Slow import com.mongodb.MongoExecutionTimeoutException import com.mongodb.OperationFunctionalSpecification import org.bson.BsonBinary @@ -25,93 +24,75 @@ import org.bson.BsonInt32 import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf +import util.spock.annotations.Slow +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync -import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isSharded class CommandOperationSpecification extends OperationFunctionalSpecification { def 'should execute read command'() { given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())), - new BsonDocumentCodec()) + def operation = new CommandReadOperation(CSOT_NO_TIMEOUT.get(), getNamespace().databaseName, + new BsonDocument('count', new BsonString(getCollectionName())), + new BsonDocumentCodec()) when: - def result = commandOperation.execute(getBinding()) + def result = execute(operation, async) then: result.getNumber('n').intValue() == 0 - } - def 'should execute read command asynchronously'() { - given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())), - new BsonDocumentCodec()) - when: - def result = executeAsync(commandOperation) - - then: - result.getNumber('n').intValue() == 0 + where: + async << [true, false] } + @Slow def 'should execute command larger than 16MB'() { - when: - def result = new CommandReadOperation<>(getNamespace().databaseName, - new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) - .append('query', new BsonDocument('_id', new BsonInt32(42))) - .append('update', - new BsonDocument('_id', new BsonInt32(42)) - .append('b', new BsonBinary( - new byte[16 * 1024 * 1024 - 30]))), - new BsonDocumentCodec()) - .execute(getBinding()) - - then: - result.containsKey('value') - } - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())) - .append('maxTimeMS', new BsonInt32(1)), - new BsonDocumentCodec()) - enableMaxTimeFailPoint() + def operation = new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getNamespace().databaseName, + new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) + .append('query', new BsonDocument('_id', new BsonInt32(42))) + .append('update', + new BsonDocument('_id', new BsonInt32(42)) + .append('b', new BsonBinary( + new byte[16 * 1024 * 1024 - 30]))), + new BsonDocumentCodec()) when: - commandOperation.execute(getBinding()) + def result = execute(operation, async) then: - thrown(MongoExecutionTimeoutException) + result.containsKey('value') - cleanup: - disableMaxTimeFailPoint() + where: + async << [true, false] } - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { + def 'should throw execution timeout exception from execute'() { given: - def commandOperation = new CommandReadOperation(getNamespace().databaseName, - new BsonDocument('count', new BsonString(getCollectionName())) - .append('maxTimeMS', new BsonInt32(1)), - new BsonDocumentCodec()) + def operation = new CommandReadOperation(CSOT_MAX_TIME.get(), getNamespace().databaseName, + new BsonDocument('count', new BsonString(getCollectionName())) + .append('maxTimeMS', new BsonInt32(99)), // TODO - JAVA-5098 determine the correct course of action here. + new BsonDocumentCodec()) enableMaxTimeFailPoint() when: - executeAsync(commandOperation) + execute(operation, async) then: thrown(MongoExecutionTimeoutException) cleanup: disableMaxTimeFailPoint() + + where: + async << [true, false] } + } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy index 0d91963d5bf..145511909d7 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy @@ -21,6 +21,7 @@ import org.bson.BsonDocument import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.WriteConcern.ACKNOWLEDGED import static com.mongodb.WriteConcern.MAJORITY @@ -33,13 +34,13 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi def expectedCommand = BsonDocument.parse('{commitTransaction: 1}') when: - def operation = new CommitTransactionOperation(ACKNOWLEDGED) + def operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), ACKNOWLEDGED) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) when: - operation = new CommitTransactionOperation(MAJORITY) + operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), MAJORITY) expectedCommand.put('writeConcern', MAJORITY.asDocument()) then: @@ -56,14 +57,14 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi when: def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) - def operation = new CommitTransactionOperation(writeConcern) + def operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) then: testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) when: writeConcern = MAJORITY - operation = new CommitTransactionOperation(writeConcern) + operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) expectedCommand.put('writeConcern', writeConcern.withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) then: @@ -71,7 +72,7 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi when: writeConcern = ACKNOWLEDGED - operation = new CommitTransactionOperation(writeConcern) + operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), writeConcern) expectedCommand.put('writeConcern', writeConcern.withW('majority').withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) then: @@ -87,7 +88,7 @@ class CommitTransactionOperationSpecification extends OperationFunctionalSpecifi def expectedCommand = BsonDocument.parse('{commitTransaction: 1, writeConcern: {w: "majority", wtimeout: 10000}}') when: - def operation = new CommitTransactionOperation(ACKNOWLEDGED, true) + def operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), ACKNOWLEDGED, true) then: testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy index c308e115ca8..c3bffe18e51 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy @@ -27,6 +27,7 @@ import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ConnectionId import com.mongodb.connection.ServerId +import com.mongodb.internal.ClientSideOperationTimeouts import com.mongodb.internal.binding.AsyncConnectionSource import com.mongodb.internal.binding.AsyncReadBinding import com.mongodb.internal.binding.ConnectionSource @@ -45,6 +46,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync @@ -53,8 +56,7 @@ import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS + class CountDocumentsOperationSpecification extends OperationFunctionalSpecification { @@ -73,11 +75,10 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should have the correct defaults'() { when: - CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) + CountDocumentsOperation operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()) then: operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getHint() == null operation.getLimit() == 0 operation.getSkip() == 0 @@ -89,8 +90,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def hint = new BsonString('hint') when: - CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) - .maxTime(10, MILLISECONDS) + CountDocumentsOperation operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()) .filter(filter) .hint(hint) .limit(20) @@ -98,7 +98,6 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10 operation.getHint() == hint operation.getLimit() == 20 operation.getSkip() == 30 @@ -106,7 +105,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should get the count'() { expect: - execute(new CountDocumentsOperation(getNamespace()), async) == documents.size() + execute(new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()), async) == documents.size() where: async << [true, false] @@ -117,7 +116,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat getCollectionHelper().drop() then: - execute(new CountDocumentsOperation(getNamespace()), async) == 0 + execute(new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()), async) == 0 where: async << [true, false] @@ -129,7 +128,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat getCollectionHelper().create() then: - execute(new CountDocumentsOperation(getNamespace()), async) == 0 + execute(new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()), async) == 0 where: async << [true, false] @@ -137,7 +136,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should throw execution timeout exception from execute'() { given: - def operation = new CountDocumentsOperation(getNamespace()).maxTime(1, SECONDS) + def operation = new CountDocumentsOperation(ClientSideOperationTimeouts.create(1_000), getNamespace()) enableMaxTimeFailPoint() when: @@ -155,7 +154,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use limit with the count'() { when: - def operation = new CountDocumentsOperation(getNamespace()).limit(1) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()).limit(1) then: execute(operation, async) == 1 @@ -166,7 +165,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use skip with the count'() { when: - def operation = new CountDocumentsOperation(getNamespace()).skip(documents.size() - 2) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()).skip(documents.size() - 2) then: execute(operation, async) @@ -179,9 +178,9 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use hint with the count'() { given: def indexDefinition = new BsonDocument('y', new BsonInt32(1)) - new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)]) + new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null) .execute(getBinding()) - def operation = new CountDocumentsOperation(getNamespace()).hint(indexDefinition) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()).hint(indexDefinition) when: def count = execute(operation, async) @@ -196,7 +195,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ !serverVersionAtLeast(3, 6) }) def 'should support hints that are bson documents or strings'() { expect: - execute(new CountDocumentsOperation(getNamespace()).hint(hint), async) == 5 + execute(new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()).hint(hint), async) == 5 where: [async, hint] << [[true, false], [new BsonString('_id_'), BsonDocument.parse('{_id: 1}')]].combinations() @@ -204,7 +203,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should throw with bad hint'() { given: - def operation = new CountDocumentsOperation(getNamespace()) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()) .filter(new BsonDocument('a', new BsonInt32(1))) .hint(new BsonString('BAD HINT')) @@ -220,7 +219,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new CountDocumentsOperation(helper.namespace) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace) .filter(BsonDocument.parse('{a: 1}')) then: @@ -233,7 +232,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should create the expected aggregation command'() { when: def filter = new BsonDocument('filter', new BsonInt32(1)) - def operation = new CountDocumentsOperation(helper.namespace) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace) def pipeline = [BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')] def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) .append('pipeline', new BsonArray(pipeline)) @@ -243,11 +242,11 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) when: - operation.filter(filter) + operation = new CountDocumentsOperation(CSOT_MAX_TIME.get(), helper.namespace) + .filter(filter) .limit(20) .skip(30) .hint(hint) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) expectedCommand = expectedCommand @@ -255,7 +254,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat new BsonDocument('$skip', new BsonInt64(30)), new BsonDocument('$limit', new BsonInt64(20)), pipeline.last()])) - .append('maxTimeMS', new BsonInt32(10)) + .append('maxTimeMS', new BsonInt64(100)) .append('collation', defaultCollation.asDocument()) .append('hint', hint) @@ -270,7 +269,8 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat def 'should support collation'() { given: getCollectionHelper().insertDocuments(BsonDocument.parse('{str: "foo"}')) - def operation = new CountDocumentsOperation(namespace).filter(BsonDocument.parse('{str: "FOO"}')) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), namespace) + .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) when: @@ -301,7 +301,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new CountDocumentsOperation(getNamespace()) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()) when: operation.execute(binding) @@ -341,7 +341,7 @@ class CountDocumentsOperationSpecification extends OperationFunctionalSpecificat .append('cursor', new BsonDocument()) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new CountDocumentsOperation(getNamespace()) + def operation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), getNamespace()) when: executeAsync(operation, binding) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy index 2d3bdf962f4..6cda8d45a8b 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy @@ -29,6 +29,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -38,7 +39,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should have the correct defaults'() { when: - CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + CreateCollectionOperation operation = createOperation() then: !operation.isCapped() @@ -61,7 +62,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def validator = BsonDocument.parse('{ level: { $gte : 10 }}') when: - CreateCollectionOperation operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + CreateCollectionOperation operation = createOperation() .autoIndex(false) .capped(true) .sizeInBytes(1000) @@ -91,7 +92,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific assert !collectionNameExists(getCollectionName()) when: - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() execute(operation, async) then: @@ -108,14 +109,14 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific if (serverVersionLessThan(4, 2)) { storageEngineOptions.append('mmapv1', new BsonDocument()) } - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .storageEngineOptions(storageEngineOptions) when: execute(operation, async) then: - new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { + new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions @@ -130,14 +131,14 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific if (serverVersionLessThan(4, 2)) { storageEngineOptions.append('mmapv1', new BsonDocument()) } - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .storageEngineOptions(storageEngineOptions) when: execute(operation, async) then: - new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { + new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocumentCodec()).execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() }.getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions @@ -148,7 +149,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should create capped collection'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .capped(true) .maxDocuments(100) .sizeInBytes(40 * 1024) @@ -160,7 +161,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific collectionNameExists(getCollectionName()) when: - def stats = new CommandReadOperation<>(getDatabaseName(), + def stats = new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('collStats', new BsonString(getCollectionName())), new BsonDocumentCodec()).execute(getBinding()) @@ -179,14 +180,14 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should create collection in respect to the autoIndex option'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .autoIndex(autoIndex) when: execute(operation, async) then: - new CommandReadOperation<>(getDatabaseName(), + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('collStats', new BsonString(getCollectionName())), new DocumentCodec()).execute(getBinding()) .getInteger('nindexes') == expectedNumberOfIndexes @@ -204,7 +205,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific given: assert !collectionNameExists(getCollectionName()) def indexOptionDefaults = BsonDocument.parse('{ storageEngine: { wiredTiger : {} }}') - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .indexOptionDefaults(indexOptionDefaults) when: @@ -223,7 +224,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific given: assert !collectionNameExists(getCollectionName()) def validator = BsonDocument.parse('{ level: { $gte : 10 }}') - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()) + def operation = createOperation() .validator(validator) .validationLevel(ValidationLevel.MODERATE) .validationAction(ValidationAction.ERROR) @@ -252,7 +253,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific def 'should throw on write concern error'() { given: assert !collectionNameExists(getCollectionName()) - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName(), new WriteConcern(5)) + def operation = createOperation(new WriteConcern(5)) when: execute(operation, async) @@ -269,7 +270,7 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to create a collection with a collation'() { given: - def operation = new CreateCollectionOperation(getDatabaseName(), getCollectionName()).collation(defaultCollation) + def operation = createOperation().collation(defaultCollation) when: execute(operation, async) @@ -284,11 +285,19 @@ class CreateCollectionOperationSpecification extends OperationFunctionalSpecific } def getCollectionInfo(String collectionName) { - new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() } def collectionNameExists(String collectionName) { getCollectionInfo(collectionName) != null } + + def createOperation() { + createOperation(null) + } + + def createOperation(WriteConcern writeConcern) { + new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), getCollectionName(), writeConcern) + } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy index ee0725a9bde..968e56279f0 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy @@ -24,6 +24,7 @@ import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoWriteConcernException import com.mongodb.OperationFunctionalSpecification import com.mongodb.WriteConcern +import com.mongodb.internal.ClientSideOperationTimeout import com.mongodb.internal.bulk.IndexRequest import org.bson.BsonBoolean import org.bson.BsonDocument @@ -35,6 +36,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding @@ -53,14 +56,13 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should get index names'() { when: - def createIndexOperation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field1', new BsonInt32(1))), - new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))), - new IndexRequest(new BsonDocument('field3', new BsonInt32(1)) - .append('field4', new BsonInt32(-1))), - new IndexRequest(new BsonDocument('field5', new BsonInt32(-1))) - .name('customName') - ]) + def createIndexOperation = createOperation([new IndexRequest(new BsonDocument('field1', new BsonInt32(1))), + new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field3', new BsonInt32(1)) + .append('field4', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field5', new BsonInt32(-1))) + .name('customName') + ]) then: createIndexOperation.indexNames == ['field1_1', 'field2_-1', 'field3_1_field4_-1', 'customName'] } @@ -68,7 +70,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -84,7 +86,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw execution timeout exception from execute'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]).maxTime(30, SECONDS) + def operation = createOperation(CSOT_MAX_TIME.get(), [new IndexRequest(keys)]) enableMaxTimeFailPoint() @@ -105,7 +107,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw exception if commit quorum is set where server < 4.4'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) .commitQuorum(CreateIndexCommitQuorum.MAJORITY) when: @@ -124,7 +126,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def keys = new BsonDocument('field', new BsonInt32(1)) when: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) .commitQuorum(quorum) then: @@ -144,7 +146,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index with a BsonInt64'() { given: def keys = new BsonDocument('field', new BsonInt64(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -160,8 +162,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati given: def keysForFirstIndex = new BsonDocument('field', new BsonInt32(1)) def keysForSecondIndex = new BsonDocument('field2', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keysForFirstIndex), - new IndexRequest(keysForSecondIndex)]) + def operation = createOperation([new IndexRequest(keysForFirstIndex), + new IndexRequest(keysForSecondIndex)]) when: execute(operation, async) @@ -176,7 +178,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a single index on a nested field'() { given: def keys = new BsonDocument('x.y', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)]) + def operation = createOperation([new IndexRequest(keys)]) when: execute(operation, async) @@ -191,8 +193,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to handle duplicate key errors when indexing'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)]) + def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)]) when: execute(operation, async) @@ -208,8 +209,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should drop duplicates'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)]) + def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true).dropDups(true)]) when: execute(operation, async) @@ -223,7 +223,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw when trying to build an invalid index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument())]) + def operation = createOperation([new IndexRequest(new BsonDocument())]) when: execute(operation, async) @@ -237,8 +237,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a unique index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -248,8 +247,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)]) execute(operation, async) then: @@ -261,7 +259,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a sparse index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -271,8 +269,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)]) execute(operation, async) then: @@ -284,8 +281,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a TTL indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -295,8 +291,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)]) execute(operation, async) then: @@ -309,8 +304,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2d indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2d')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d')))]) when: execute(operation, async) @@ -320,8 +314,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)]) execute(operation, async) then: @@ -337,9 +330,10 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionAtLeast(5, 0) }) def 'should be able to create a geoHaystack indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('geoHaystack')).append('field1', new BsonInt32(1))) - .bucketSize(10.0)]) + def operation = createOperation( + [new IndexRequest(new BsonDocument('field', new BsonString('geoHaystack')) + .append('field1', new BsonInt32(1))) + .bucketSize(10.0)]) when: execute(operation, async) @@ -354,8 +348,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2dSphereIndex'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))]) when: execute(operation, async) @@ -369,8 +362,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a 2dSphereIndex with version 1'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)]) when: execute(operation, async) @@ -385,11 +377,10 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndex'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text'))) - .defaultLanguage('es') - .languageOverride('language') - .weights(new BsonDocument('field', new BsonInt32(100)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))) + .defaultLanguage('es') + .languageOverride('language') + .weights(new BsonDocument('field', new BsonInt32(100)))]) when: execute(operation, async) @@ -406,8 +397,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndexVersion'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text')))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text')))]) when: execute(operation, async) @@ -421,8 +411,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a textIndexVersion with version 1'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)]) when: execute(operation, async) @@ -438,9 +427,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should pass through storage engine options'() { given: def storageEngineOptions = new Document('wiredTiger', new Document('configString', 'block_compressor=zlib')) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('a', new BsonInt32(1))) - .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))]) + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))]) when: execute(operation, async) @@ -456,9 +444,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should be able to create a partially filtered index'() { given: def partialFilterExpression = new Document('a', new Document('$gte', 10)) - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))) - .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, new DocumentCodec()))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))) + .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, + new DocumentCodec()))]) when: execute(operation, async) @@ -474,7 +462,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati def 'should throw on write concern error'() { given: def keys = new BsonDocument('field', new BsonInt32(1)) - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)], new WriteConcern(5)) + def operation = new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new IndexRequest(keys)], new WriteConcern(5)) when: execute(operation, async) @@ -491,8 +479,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should be able to create an index with collation'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)]) + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)]) when: execute(operation, async) @@ -509,9 +496,8 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to create wildcard indexes'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('$**', new BsonInt32(1))), - new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))), + new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))]) when: execute(operation, async) @@ -527,9 +513,9 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 2) }) def 'should be able to create wildcard index with projection'() { given: - def operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('$**', new BsonInt32(1))) - .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', BsonBoolean.FALSE))]) + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))) + .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', + BsonBoolean.FALSE))]) when: execute(operation, async) @@ -545,7 +531,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati @IgnoreIf({ serverVersionLessThan(4, 4) }) def 'should be able to set hidden index'() { given: - def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) when: execute(operation, async) @@ -555,8 +541,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati when: getCollectionHelper().drop(getNamespace()) - operation = new CreateIndexesOperation(getNamespace(), - [new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)]) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)]) execute(operation, async) then: @@ -574,7 +559,7 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati List getIndexes() { def indexes = [] - def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) + def cursor = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).execute(getBinding()) while (cursor.hasNext()) { indexes.addAll(cursor.next()) } @@ -589,4 +574,12 @@ class CreateIndexesOperationSpecification extends OperationFunctionalSpecificati getUserCreatedIndexes()*.get(keyname).findAll { it != null } } + def createOperation(final List requests) { + createOperation(CSOT_NO_TIMEOUT.get(), requests) + } + + def createOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final List requests) { + new CreateIndexesOperation(clientSideOperationTimeout, getNamespace(), requests, null) + } + } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy index 87fc13aaa31..9dc816dcc44 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy @@ -29,6 +29,7 @@ import org.bson.BsonString import org.bson.codecs.BsonDocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast @@ -51,7 +52,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification getCollectionHelper().insertDocuments([trueXDocument, falseXDocument]) def pipeline = [new BsonDocument('$match', trueXDocument)] - def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, WriteConcern.ACKNOWLEDGED) + def operation = new CreateViewOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), viewName, viewOn, pipeline, + WriteConcern.ACKNOWLEDGED) when: execute(operation, async) @@ -79,7 +81,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification assert !collectionNameExists(viewOn) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], WriteConcern.ACKNOWLEDGED) + def operation = new CreateViewOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), viewName, viewOn, [], + WriteConcern.ACKNOWLEDGED) .collation(defaultCollation) when: @@ -100,7 +103,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification @IgnoreIf({ serverVersionAtLeast(3, 4) }) def 'should throw if server version is not 3.4 or greater'() { given: - def operation = new CreateViewOperation(getDatabaseName(), getCollectionName() + '-view', + def operation = new CreateViewOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), getCollectionName() + '-view', getCollectionName(), [], WriteConcern.ACKNOWLEDGED) when: @@ -120,7 +123,8 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) assert !collectionNameExists(viewName) - def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], new WriteConcern(5)) + def operation = new CreateViewOperation(CSOT_NO_TIMEOUT.get(), getDatabaseName(), viewName, getCollectionName(), [], + new WriteConcern(5)) when: execute(operation, async) @@ -138,7 +142,7 @@ class CreateViewOperationSpecification extends OperationFunctionalSpecification } def getCollectionInfo(String collectionName) { - new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy index 40f707ccf1b..e6ffcf9c188 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy @@ -53,6 +53,8 @@ import org.bson.codecs.ValueCodecProvider import org.bson.types.ObjectId import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync @@ -60,8 +62,6 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders class DistinctOperationSpecification extends OperationFunctionalSpecification { @@ -76,11 +76,10 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should have the correct defaults'() { when: - DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'name', stringDecoder) then: operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getCollation() == null } @@ -89,14 +88,12 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def filter = new BsonDocument('filter', new BsonInt32(1)) when: - DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) - .maxTime(10, MILLISECONDS) + DistinctOperation operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'name', stringDecoder) .filter(filter) .collation(defaultCollation) then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10 operation.getCollation() == defaultCollation } @@ -106,7 +103,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 'Sam').append('age', 21) Document pete2 = new Document('name', 'Pete').append('age', 25) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'name', stringDecoder) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -124,7 +121,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 'Sam').append('age', 21) Document pete2 = new Document('name', 'Pete').append('age', 25) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + def operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'name', stringDecoder) .filter(new BsonDocument('age', new BsonInt32(25))) when: @@ -155,7 +152,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('numberOfJobs', sam.numberOfJobs) getCollectionHelper().insertDocuments(new Document('worker', peteDocument), new Document('worker', samDocument)) - DistinctOperation operation = new DistinctOperation(getNamespace(), 'worker', new WorkerCodec()) + DistinctOperation operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'worker', new WorkerCodec()) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -174,7 +171,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { Document sam = new Document('name', 1) Document pete2 = new Document('name', new Document('earle', 'Jones')) getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) - DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + DistinctOperation operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'name', stringDecoder) when: execute(operation, async) @@ -188,7 +185,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should throw execution timeout exception from execute'() { given: - def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder).maxTime(1, SECONDS) + def operation = new DistinctOperation(CSOT_MAX_TIME.get(), getNamespace(), 'name', stringDecoder) enableMaxTimeFailPoint() when: @@ -206,7 +203,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new DistinctOperation(helper.namespace, 'name', helper.decoder) + def operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, 'name', helper.decoder) then: testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) @@ -217,15 +214,14 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { def 'should create the expected command'() { when: - def operation = new DistinctOperation(helper.namespace, 'name', new BsonDocumentCodec()) + def operation = new DistinctOperation(CSOT_MAX_TIME.get(), helper.namespace, 'name', new BsonDocumentCodec()) .filter(new BsonDocument('a', BsonBoolean.TRUE)) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) def expectedCommand = new BsonDocument('distinct', new BsonString(helper.namespace.getCollectionName())) .append('key', new BsonString('name')) .append('query', operation.getFilter()) - .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS))) + .append('maxTimeMS', new BsonInt64(100)) .append('collation', defaultCollation.asDocument()) then: @@ -240,7 +236,8 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { given: def document = Document.parse('{str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new DistinctOperation(namespace, 'str', stringDecoder).filter(BsonDocument.parse('{str: "FOO"}}')) + def operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), namespace, 'str', stringDecoder) + .filter(BsonDocument.parse('{str: "FOO"}}')) .collation(caseInsensitiveCollation) when: @@ -269,7 +266,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('key', new BsonString('str')) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) + def operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'str', new StringCodec()) when: operation.execute(binding) @@ -308,7 +305,7 @@ class DistinctOperationSpecification extends OperationFunctionalSpecification { .append('key', new BsonString('str')) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) + def operation = new DistinctOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'str', new StringCodec()) when: executeAsync(operation, binding) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy index 0c293ed58b0..bf850b0fc13 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy @@ -24,6 +24,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -37,7 +39,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - new DropCollectionOperation(getNamespace()).execute(getBinding()) + new DropCollectionOperation(CSOT_TIMEOUT.get(), getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists(getCollectionName()) @@ -50,7 +52,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat assert collectionNameExists(getCollectionName()) when: - executeAsync(new DropCollectionOperation(getNamespace())) + executeAsync(new DropCollectionOperation(CSOT_TIMEOUT.get(), getNamespace(), WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists(getCollectionName()) @@ -61,7 +63,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - new DropCollectionOperation(namespace).execute(getBinding()) + new DropCollectionOperation(CSOT_TIMEOUT.get(), namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) then: !collectionNameExists('nonExistingCollection') @@ -73,7 +75,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') when: - executeAsync(new DropCollectionOperation(namespace)) + executeAsync(new DropCollectionOperation(CSOT_TIMEOUT.get(), namespace, WriteConcern.ACKNOWLEDGED)) then: !collectionNameExists('nonExistingCollection') @@ -84,7 +86,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) assert collectionNameExists(getCollectionName()) - def operation = new DropCollectionOperation(getNamespace(), new WriteConcern(5)) + def operation = new DropCollectionOperation(CSOT_TIMEOUT.get(), getNamespace(), new WriteConcern(5)) when: async ? executeAsync(operation) : operation.execute(getBinding()) @@ -99,7 +101,7 @@ class DropCollectionOperationSpecification extends OperationFunctionalSpecificat } def collectionNameExists(String collectionName) { - def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) + def cursor = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).execute(getBinding()) if (!cursor.hasNext()) { return false } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy index 1069dbfe2a6..746aa9c0305 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy @@ -25,6 +25,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding @@ -42,47 +44,28 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio assert databaseNameExists(databaseName) when: - new DropDatabaseOperation(databaseName).execute(getBinding()) + execute(new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(databaseName) - } - - - @IgnoreIf({ isSharded() }) - def 'should drop a database that exists asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) - assert databaseNameExists(databaseName) - when: - executeAsync(new DropDatabaseOperation(databaseName)) - - then: - !databaseNameExists(databaseName) + where: + async << [true, false] } + def 'should not error when dropping a collection that does not exist'() { given: def dbName = 'nonExistingDatabase' when: - new DropDatabaseOperation(dbName).execute(getBinding()) + execute(new DropDatabaseOperation(CSOT_TIMEOUT.get(), dbName, WriteConcern.ACKNOWLEDGED), async) then: !databaseNameExists(dbName) - } - - - def 'should not error when dropping a collection that does not exist asynchronously'() { - given: - def dbName = 'nonExistingDatabase' - when: - executeAsync(new DropDatabaseOperation(dbName)) - - then: - !databaseNameExists(dbName) + where: + async << [true, false] } @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) @@ -92,7 +75,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio // On servers older than 4.0 that don't support this failpoint, use a crazy w value instead def w = serverVersionAtLeast(4, 0) ? 2 : 5 - def operation = new DropDatabaseOperation(databaseName, new WriteConcern(w)) + def operation = new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, new WriteConcern(w)) if (serverVersionAtLeast(4, 0)) { configureFailPoint(BsonDocument.parse('{ configureFailPoint: "failCommand", ' + 'mode : {times : 1}, ' + @@ -113,7 +96,7 @@ class DropDatabaseOperationSpecification extends OperationFunctionalSpecificatio } def databaseNameExists(String databaseName) { - new ListDatabasesOperation(new DocumentCodec()).execute(getBinding()).next()*.name.contains(databaseName) + new ListDatabasesOperation(CSOT_NO_TIMEOUT.get(), new DocumentCodec()).execute(getBinding()).next()*.name.contains(databaseName) } } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy index 029b2c8544b..eca81e6ca6a 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy @@ -30,19 +30,20 @@ import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf import spock.lang.Unroll +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionLessThan -import static java.util.concurrent.TimeUnit.SECONDS class DropIndexOperationSpecification extends OperationFunctionalSpecification { def 'should not error when dropping non-existent index on non-existent collection'() { when: - execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'made_up_index_1', null), async) then: getIndexes().size() == 0 @@ -56,7 +57,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: - execute(new DropIndexOperation(getNamespace(), 'made_up_index_1'), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'made_up_index_1', null), async) then: thrown(MongoException) @@ -70,7 +71,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) when: - execute(new DropIndexOperation(getNamespace(), 'theField_1'), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'theField_1', null), async) List indexes = getIndexes() then: @@ -87,7 +88,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(getNamespace(), keys), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), keys, null), async) List indexes = getIndexes() then: @@ -110,7 +111,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { given: def keys = new BsonDocument('theField', new BsonInt32(1)) collectionHelper.createIndex(keys) - def operation = new DropIndexOperation(getNamespace(), keys).maxTime(30, SECONDS) + def operation = new DropIndexOperation(CSOT_MAX_TIME.get(), getNamespace(), keys, null) enableMaxTimeFailPoint() @@ -133,7 +134,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(keys) when: - execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1))), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null), async) List indexes = getIndexes() then: @@ -150,7 +151,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { collectionHelper.createIndex(new BsonDocument('theOtherField', new BsonInt32(1))) when: - execute(new DropIndexOperation(getNamespace(), '*'), async) + execute(new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), '*', null), async) List indexes = getIndexes() then: @@ -165,7 +166,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { def 'should throw on write concern error'() { given: collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) - def operation = new DropIndexOperation(getNamespace(), 'theField_1', new WriteConcern(5)) + def operation = new DropIndexOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), 'theField_1', new WriteConcern(5)) when: execute(operation, async) @@ -181,7 +182,7 @@ class DropIndexOperationSpecification extends OperationFunctionalSpecification { def getIndexes() { def indexes = [] - def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) + def cursor = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).execute(getBinding()) while (cursor.hasNext()) { indexes.addAll(cursor.next()) } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy index aad74b1881f..5d03507aea1 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy @@ -34,8 +34,8 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -54,7 +54,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults'() { when: - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec) then: operation.getNamespace() == getNamespace() @@ -63,7 +63,6 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.MILLISECONDS) == 0 operation.getCollation() == null } @@ -74,18 +73,16 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def projection = BsonDocument.parse('{ projection : 1}') when: - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(filter) .sort(sort) .projection(projection) - .maxTime(10, TimeUnit.MILLISECONDS) .collation(defaultCollation) then: operation.getFilter() == filter operation.getSort() == sort operation.getProjection() == projection - operation.getMaxTime(TimeUnit.MILLISECONDS) == 10 operation.getCollation() == defaultCollation } @@ -97,7 +94,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -118,8 +115,8 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getWorkerCollectionHelper().insertDocuments(new WorkerCodec(), pete, sam) when: - FindAndDeleteOperation operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, - workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) + FindAndDeleteOperation operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, + false, workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) then: @@ -138,7 +135,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) Document pete = new Document('name', 'Pete').append('job', 'handyman') helper.insertDocuments(new DocumentCodec(), pete) - def operation = new FindAndDeleteOperation(getNamespace(), new WriteConcern(5, 1), false, + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new WriteConcern(5, 1), false, documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -170,7 +167,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') configureFailPoint(failPoint) - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -196,10 +193,11 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def includeTxnNumber = retryWrites && writeConcern.isAcknowledged() && serverType != STANDALONE def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) - def operation = new FindAndDeleteOperation(getNamespace(), writeConcern as WriteConcern, + def operation = new FindAndDeleteOperation(CSOT_MAX_TIME.get(), getNamespace(), writeConcern as WriteConcern, retryWrites as boolean, documentCodec) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('remove', BsonBoolean.TRUE) + .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) @@ -220,12 +218,10 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati operation.filter(filter) .sort(sort) .projection(projection) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) @@ -252,7 +248,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, documentCodec) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -273,7 +269,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should retry if the connection initially fails'() { when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, documentCodec) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('remove', BsonBoolean.TRUE) .append('txnNumber', new BsonInt64(0)) @@ -287,7 +283,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati def 'should throw original error when retrying and failing'() { given: - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, documentCodec) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -315,7 +311,7 @@ class FindAndDeleteOperationSpecification extends OperationFunctionalSpecificati given: def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + def operation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy index a4a0a48bd60..2f4b659de62 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy @@ -40,8 +40,8 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -62,7 +62,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should have the correct defaults and passed values'() { when: def replacement = new BsonDocument('replace', new BsonInt32(1)) - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec, + replacement) then: operation.getNamespace() == getNamespace() @@ -72,7 +73,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -84,9 +84,9 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocument('replace', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true).returnOriginal(false) + .bypassDocumentValidation(true).upsert(true).returnOriginal(false) .collation(defaultCollation) then: @@ -94,7 +94,6 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -110,7 +109,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -120,7 +120,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.find().get(0).getString('name') == 'Jordan' when: - operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec, new BsonDocumentWrapper(pete, documentCodec)) .filter(new BsonDocument('name', new BsonString('Jordan'))) .returnOriginal(false) @@ -144,8 +144,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new WorkerCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, - replacement).filter(new BsonDocument('name', new BsonString('Pete'))) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + workerCodec, replacement).filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) then: @@ -154,7 +154,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: replacement = new BsonDocumentWrapper(pete, workerCodec) - operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, replacement) + operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, workerCodec, + replacement) .filter(new BsonDocument('name', new BsonString('Jordan'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -169,7 +170,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should return null if query fails to match'() { when: BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -183,7 +185,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw an exception if replacement contains update operators'() { given: def replacement = new BsonDocumentWrapper(['$inc': 1] as Document, documentCodec) - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) when: execute(operation, async) @@ -207,7 +210,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def replacement = new BsonDocument('level', new BsonInt32(9)) - def operation = new FindAndReplaceOperation(namespace, ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), namespace, ACKNOWLEDGED, false, + documentCodec, replacement) execute(operation, async) then: @@ -245,8 +249,9 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') when: - def operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), - false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete'))) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + new WriteConcern(5, 1), false, documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) then: @@ -258,7 +263,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat ex.writeResult.upsertedId == null when: - operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), + operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new WriteConcern(5, 1), false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Bob'))) .upsert(true) execute(operation, async) @@ -290,7 +295,7 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat configureFailPoint(failPoint) BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -317,9 +322,11 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(getNamespace(), writeConcern, retryWrites, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_MAX_TIME.get(), getNamespace(), writeConcern, retryWrites, + documentCodec, replacement) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', replacement) + .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) } @@ -341,12 +348,10 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat .sort(sort) .projection(projection) .bypassDocumentValidation(true) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) @@ -376,7 +381,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat helper.insertDocuments(new DocumentCodec(), pete, sam) when: - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, jordan) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, jordan) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -398,7 +404,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', replacement) .append('txnNumber', new BsonInt64(0)) @@ -414,7 +421,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def 'should throw original error when retrying and failing'() { given: def replacement = BsonDocument.parse('{ replacement: 1}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -443,7 +451,8 @@ class FindAndReplaceOperationSpecification extends OperationFunctionalSpecificat def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def replacement = BsonDocument.parse('{str: "bar"}') - def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, replacement) + def operation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy index d6625cd4d88..8b2d2f33f1e 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy @@ -41,8 +41,8 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf -import java.util.concurrent.TimeUnit - +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -64,7 +64,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults and passed values'() { when: def update = new BsonDocument('update', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) then: operation.getNamespace() == getNamespace() @@ -74,7 +75,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -83,7 +83,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should have the correct defaults and passed values using update pipelines'() { when: def updatePipeline = new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1)))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, updatePipeline) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, updatePipeline) then: operation.getNamespace() == getNamespace() @@ -93,7 +94,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getFilter() == null operation.getSort() == null operation.getProjection() == null - operation.getMaxTime(TimeUnit.SECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null } @@ -105,9 +105,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, - new BsonDocument('update', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + ACKNOWLEDGED, false, documentCodec, new BsonDocument('update', new BsonInt32(1))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) .returnOriginal(false) .collation(defaultCollation) @@ -116,7 +119,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -130,10 +132,12 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def projection = new BsonDocument('projection', new BsonInt32(1)) when: - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, - new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) - .filter(filter).sort(sort).projection(projection) - .bypassDocumentValidation(true).maxTime(1, TimeUnit.SECONDS).upsert(true) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) .returnOriginal(false) .collation(defaultCollation) @@ -142,7 +146,6 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati operation.getSort() == sort operation.getProjection() == projection operation.upsert == true - operation.getMaxTime(TimeUnit.SECONDS) == 1 operation.getBypassDocumentValidation() !operation.isReturnOriginal() operation.getCollation() == defaultCollation @@ -158,7 +161,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -169,7 +173,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -192,7 +197,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) Document returnedDocument = execute(operation, false) @@ -203,7 +209,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, false) @@ -223,7 +230,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Worker returnedDocument = execute(operation, async) @@ -234,7 +242,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, update) + operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) returnedDocument = execute(operation, async) @@ -257,7 +266,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonArray(singletonList(new BsonDocument('$project', new BsonDocument('name', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) .returnOriginal(false) Document returnedDocument = execute(operation, async) @@ -273,7 +283,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should return null if query fails to match'() { when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false + , documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) Document returnedDocument = execute(operation, async) @@ -287,7 +298,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw an exception if update contains fields that are not update operators'() { given: def update = new BsonDocument('x', new BsonInt32(1)) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) when: execute(operation, async) @@ -304,7 +316,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw an exception if update pipeline contains operations that are not supported'() { when: def update = new BsonArray(singletonList(new BsonDocument('$foo', new BsonDocument('x', new BsonInt32(1))))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) execute(operation, async) then: @@ -312,7 +325,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: update = singletonList(new BsonInt32(1)) - operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) execute(operation, async) then: @@ -333,7 +347,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('level', new BsonInt32(-1))) - def operation = new FindAndUpdateOperation(namespace, ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), namespace, ACKNOWLEDGED, false, + documentCodec, update) execute(operation, async) then: @@ -368,7 +383,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) when: - def operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + new WriteConcern(5, 1), false, documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) execute(operation, async) @@ -381,7 +397,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati ex.writeResult.upsertedId == null when: - operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, documentCodec, update) + operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new WriteConcern(5, 1), false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Bob'))) .upsert(true) execute(operation, async) @@ -410,7 +427,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati configureFailPoint(failPoint) def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) when: @@ -437,9 +455,11 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(getNamespace(), writeConcern, retryWrites, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_MAX_TIME.get(), getNamespace(), writeConcern, retryWrites, + documentCodec, update) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', update) + .append('maxTimeMS', new BsonInt64(100)) if (includeWriteConcern) { expectedCommand.put('writeConcern', writeConcern.asDocument()) } @@ -461,12 +481,10 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati .sort(sort) .projection(projection) .bypassDocumentValidation(true) - .maxTime(10, TimeUnit.MILLISECONDS) expectedCommand.append('query', filter) .append('sort', sort) .append('fields', projection) - .append('maxTimeMS', new BsonInt64(10)) operation.collation(defaultCollation) expectedCommand.append('collation', defaultCollation.asDocument()) @@ -496,7 +514,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) .filter(new BsonDocument('name', new BsonString('Pete'))) enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) @@ -519,7 +538,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati when: def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) .append('update', update) .append('txnNumber', new BsonInt64(0)) @@ -535,7 +555,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def 'should throw original error when retrying and failing'() { given: def update = BsonDocument.parse('{ update: 1}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) def originalException = new MongoSocketException('Some failure', new ServerAddress()) when: @@ -564,7 +585,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) def update = BsonDocument.parse('{ $set: {str: "bar"}}') - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) @@ -586,7 +608,8 @@ class FindAndUpdateOperationSpecification extends OperationFunctionalSpecificati getCollectionHelper().insertDocuments(documentOne, documentTwo) def update = BsonDocument.parse('{ $set: {"y.$[i].b": 2}}') def arrayFilters = [BsonDocument.parse('{"i.b": 3}')] - def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + def operation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) .returnOriginal(false) .arrayFilters(arrayFilters) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy index 31de9603527..61a513e289a 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy @@ -26,7 +26,6 @@ import com.mongodb.ReadPreference import com.mongodb.ReadPreferenceHedgeOptions import com.mongodb.ServerAddress import com.mongodb.async.FutureResultCallback -import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.connection.ClusterId import com.mongodb.connection.ConnectionDescription import com.mongodb.connection.ConnectionId @@ -53,6 +52,8 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync @@ -63,14 +64,10 @@ import static com.mongodb.ClusterFixture.getCluster import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan -import static com.mongodb.CursorType.NonTailable import static com.mongodb.CursorType.Tailable -import static com.mongodb.CursorType.TailableAwait import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.junit.Assert.assertEquals class FindOperationSpecification extends OperationFunctionalSpecification { @@ -80,14 +77,12 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def decoder = new DocumentCodec() when: - FindOperation operation = new FindOperation(getNamespace(), decoder) + FindOperation operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), decoder) then: operation.getNamespace() == getNamespace() operation.getDecoder() == decoder operation.getFilter() == null - operation.getMaxTime(MILLISECONDS) == 0 - operation.getMaxAwaitTime(MILLISECONDS) == 0 operation.getHint() == null operation.getLimit() == 0 operation.getSkip() == 0 @@ -107,9 +102,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def hint = new BsonString('a_1') when: - FindOperation operation = new FindOperation(getNamespace(), new DocumentCodec()) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) + FindOperation operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .filter(filter) .limit(20) .skip(30) @@ -125,8 +118,6 @@ class FindOperationSpecification extends OperationFunctionalSpecification { then: operation.getFilter() == filter - operation.getMaxTime(MILLISECONDS) == 10000 - operation.getMaxAwaitTime(MILLISECONDS) == 20000 operation.getLimit() == 20 operation.getSkip() == 30 operation.getHint() == hint @@ -143,7 +134,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: def document = new Document('_id', 1) getCollectionHelper().insertDocuments(new DocumentCodec(), document) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: def results = executeAndCollectBatchCursorResults(operation, async) @@ -169,7 +160,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).filter(new BsonDocument('_id', new BsonInt32(1)))] + [new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) + .filter(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -189,7 +181,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).sort(new BsonDocument('_id', new BsonInt32(1)))] + [new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1)))] ].combinations() } @@ -197,7 +190,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 5).append('y', 10), new Document('_id', 1).append('x', 10)) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .projection(new BsonDocument('_id', new BsonInt32(0)).append('x', new BsonInt32(1))) when: @@ -216,7 +209,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .skip(3) @@ -236,7 +229,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .limit(limit) @@ -255,7 +248,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def documents = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3), new Document('_id', 4), new Document('_id', 5)] getCollectionHelper().insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .sort(new BsonDocument('_id', new BsonInt32(1))) .batchSize(batchSize) @@ -298,7 +291,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should throw query exception'() { given: - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .filter(new BsonDocument('x', new BsonDocument('$thisIsNotAnOperator', BsonBoolean.TRUE))) when: @@ -330,7 +323,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { where: [async, operation] << [ [true, false], - [new FindOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS)] + [new FindOperation(CSOT_MAX_TIME.get(), getNamespace(), new DocumentCodec())] ].combinations() } @@ -340,7 +333,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) } collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .max(new BsonDocument('count', new BsonInt32(11))) .hint(new BsonDocument('count', new BsonInt32(1))) @@ -360,7 +353,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) } collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .min(new BsonDocument('count', new BsonInt32(10))) .hint(new BsonDocument('count', new BsonInt32(1))) @@ -381,7 +374,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } collectionHelper.createIndex(new BsonDocument('x', new BsonInt32(1))) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .filter(new BsonDocument('x', new BsonInt32(7))) .returnKey(true) @@ -401,7 +394,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def index = new BsonDocument('a', new BsonInt32(1)) collectionHelper.createIndex(index) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .hint((BsonValue) hint) .asExplainableOperation(null, new BsonDocumentCodec()) @@ -420,10 +413,10 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should apply comment'() { given: def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), new BsonDocumentCodec()) - .execute(getBinding()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) def expectedComment = 'this is a comment' - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .comment(new BsonString(expectedComment)) when: @@ -440,7 +433,8 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } cleanup: - new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), new BsonDocumentCodec()) + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()) .execute(getBinding()) profileCollectionHelper.drop() @@ -453,7 +447,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { String fieldName = serverVersionAtLeast(3, 2) ? '$recordId' : '$diskLoc' collectionHelper.insertDocuments(new BsonDocument()) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) .showRecordId(true) when: @@ -470,7 +464,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def 'should read from a secondary'() { given: collectionHelper.insertDocuments(new DocumentCodec(), new Document()) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null, IgnorableRequestContext.INSTANCE) def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, null, @@ -492,7 +486,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def documents = [new Document('_id', 3), new Document('_id', 1), new Document('_id', 2), new Document('_id', 5), new Document('_id', 4)] collectionHelper.insertDocuments(new DocumentCodec(), documents) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: def hedgeOptions = isHedgeEnabled != null ? @@ -534,7 +528,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: operation.execute(binding) @@ -574,7 +568,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: executeAsync(operation, binding) @@ -615,7 +609,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).allowDiskUse(true) when: operation.execute(binding) @@ -655,7 +649,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).allowDiskUse(true) when: executeAsync(operation, binding) @@ -682,18 +676,21 @@ class FindOperationSpecification extends OperationFunctionalSpecification { } // sanity check that the server accepts tailable and await data flags + // TODO JAVA-4058 + /* def 'should pass tailable and await data flags through'() { given: def (cursorType, maxAwaitTimeMS, maxTimeMSForCursor) = cursorDetails collectionHelper.create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)) - def operation = new FindOperation(namespace, new BsonDocumentCodec()) + def operation = new FindOperation(CSOT_MAX_TIME.get(), namespace, new BsonDocumentCodec()) .cursorType(cursorType) - .maxAwaitTime(maxAwaitTimeMS, MILLISECONDS) when: def cursor = execute(operation, async) then: + println cursor + // TODO JAVA-4058 cursor.maxTimeMS == maxTimeMSForCursor where: @@ -702,11 +699,12 @@ class FindOperationSpecification extends OperationFunctionalSpecification { [[NonTailable, 100, 0], [Tailable, 100, 0], [TailableAwait, 100, 100]] ].combinations() } + */ // sanity check that the server accepts the miscallaneous flags def 'should pass miscallaneous flags through'() { given: - def operation = new FindOperation(namespace, new BsonDocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new BsonDocumentCodec()) .noCursorTimeout(true) .partial(true) .oplogReplay(true) @@ -726,7 +724,7 @@ class FindOperationSpecification extends OperationFunctionalSpecification { given: def document = BsonDocument.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new FindOperation(getNamespace(), new BsonDocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new BsonDocumentCodec()) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy index 9e2d8937818..408f0c5640f 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy @@ -16,13 +16,13 @@ package com.mongodb.internal.operation - import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference import com.mongodb.ServerAddress import com.mongodb.ServerCursor +import com.mongodb.WriteConcern import com.mongodb.async.FutureResultCallback import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.connection.ConnectionDescription @@ -46,6 +46,9 @@ import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync @@ -53,7 +56,6 @@ import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isSharded import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan -import static java.util.concurrent.TimeUnit.MILLISECONDS class ListCollectionsOperationSpecification extends OperationFunctionalSpecification { @@ -61,7 +63,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return empty set if database does not exist'() { given: - def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), madeUpDatabase, new DocumentCodec()) when: def cursor = operation.execute(getBinding()) @@ -76,7 +78,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return empty cursor if database does not exist asynchronously'() { given: - def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), madeUpDatabase, new DocumentCodec()) when: def cursor = executeAsync(operation) @@ -92,7 +94,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return collection names if a collection exists'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) def codec = new DocumentCodec() @@ -113,7 +115,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionAtLeast(3, 0) }) def 'should throw if filtering on name with something other than a string'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) .filter(new BsonDocument('name', new BsonRegularExpression('^[^$]*$'))) when: @@ -125,7 +127,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter collection names if a name filter is specified'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) .filter(new BsonDocument('name', new BsonString('collection2'))) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) @@ -145,7 +147,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter capped collections'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) .filter(new BsonDocument('options.capped', BsonBoolean.TRUE)) def helper = getCollectionHelper() getCollectionHelper().create('collection3', new CreateCollectionOptions().capped(true).sizeInBytes(1000)) @@ -165,7 +167,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(3, 4) || serverVersionAtLeast(4, 0) }) def 'should get all fields when nameOnly is not requested'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) getCollectionHelper().create('collection4', new CreateCollectionOptions()) when: @@ -179,7 +181,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(4, 0) }) def 'should only get collection names when nameOnly is requested'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) .nameOnly(true) getCollectionHelper().create('collection5', new CreateCollectionOptions()) @@ -194,7 +196,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica @IgnoreIf({ serverVersionLessThan(3, 4) || serverVersionAtLeast(4, 0) }) def 'should only get all field names when nameOnly is requested on server versions that do not support nameOnly'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) .nameOnly(true) getCollectionHelper().create('collection6', new CreateCollectionOptions()) @@ -209,7 +211,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should return collection names if a collection exists asynchronously'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()) def helper = getCollectionHelper() def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) def codec = new DocumentCodec() @@ -230,9 +232,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before next'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -246,9 +248,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before next'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -268,9 +270,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes when calling hasNext before tryNext'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -290,9 +292,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes without calling hasNext before tryNext'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) when: def cursor = operation.execute(getBinding()) @@ -307,9 +309,9 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should filter indexes asynchronously'() { given: - new DropDatabaseOperation(databaseName).execute(getBinding()) + new DropDatabaseOperation(CSOT_TIMEOUT.get(), databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) addSeveralIndexes() - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) when: def cursor = executeAsync(operation) @@ -322,7 +324,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should use the set batchSize of collections'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) def codec = new DocumentCodec() getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) @@ -354,7 +356,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica def 'should use the set batchSize of collections asynchronously'() { given: - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).batchSize(2) def codec = new DocumentCodec() getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) @@ -384,40 +386,24 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica } @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { + def 'should throw execution timeout exception'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS) + def operation = new ListCollectionsOperation(CSOT_MAX_TIME.get(), databaseName, new DocumentCodec()) enableMaxTimeFailPoint() when: - operation.execute(getBinding()) + execute(operation, async) then: thrown(MongoExecutionTimeoutException) cleanup: disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - - when: - executeAsync(operation) - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() + where: + async << [true, false] } def 'should use the readPreference to set secondaryOk'() { @@ -433,7 +419,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica getReadPreference() >> readPreference getServerApi() >> null } - def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), helper.dbName, helper.decoder) when: '3.6.0' operation.execute(readBinding) @@ -460,7 +446,7 @@ class ListCollectionsOperationSpecification extends OperationFunctionalSpecifica getServerApi() >> null getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } - def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) + def operation = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), helper.dbName, helper.decoder) when: '3.6.0' operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy index 95afad40957..fc1aa48a964 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoExecutionTimeoutException import com.mongodb.OperationFunctionalSpecification import com.mongodb.ReadPreference @@ -35,12 +34,11 @@ import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint -import static com.mongodb.ClusterFixture.executeAsync -import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isSharded -import static java.util.concurrent.TimeUnit.MILLISECONDS class ListDatabasesOperationSpecification extends OperationFunctionalSpecification { def codec = new DocumentCodec() @@ -48,7 +46,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati def 'should return a list of database names'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = new ListDatabasesOperation(codec) + def operation = new ListDatabasesOperation(CSOT_NO_TIMEOUT.get(), codec) when: def names = executeAndCollectBatchCursorResults(operation, async)*.get('name') @@ -79,37 +77,21 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati def 'should throw execution timeout exception from execute'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS) + def operation = new ListDatabasesOperation(CSOT_MAX_TIME.get(), codec) enableMaxTimeFailPoint() when: - operation.execute(getBinding()) + execute(operation, async) then: thrown(MongoExecutionTimeoutException) cleanup: disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document()) - def operation = new ListDatabasesOperation(codec).maxTime(1000, MILLISECONDS) - - enableMaxTimeFailPoint() - when: - executeAsync(operation) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() + where: + async << [true, false] } def 'should use the readPreference to set secondaryOk'() { @@ -125,7 +107,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati getReadPreference() >> readPreference getServerApi() >> null } - def operation = new ListDatabasesOperation(helper.decoder) + def operation = new ListDatabasesOperation(CSOT_NO_TIMEOUT.get(), helper.decoder) when: operation.execute(readBinding) @@ -151,7 +133,7 @@ class ListDatabasesOperationSpecification extends OperationFunctionalSpecificati getServerApi() >> null getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } - def operation = new ListDatabasesOperation(helper.decoder) + def operation = new ListDatabasesOperation(CSOT_NO_TIMEOUT.get(), helper.decoder) when: operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy index 4ca91524e9f..324b02199a1 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.internal.operation - import com.mongodb.MongoExecutionTimeoutException import com.mongodb.MongoNamespace import com.mongodb.OperationFunctionalSpecification @@ -45,18 +44,19 @@ import org.bson.codecs.Decoder import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isSharded -import static java.util.concurrent.TimeUnit.MILLISECONDS class ListIndexesOperationSpecification extends OperationFunctionalSpecification { def 'should return empty list for nonexistent collection'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: def cursor = operation.execute(getBinding()) @@ -68,7 +68,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return empty list for nonexistent collection asynchronously'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) when: AsyncBatchCursor cursor = executeAsync(operation) @@ -82,7 +82,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return default index on Collection that exists'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: @@ -98,7 +98,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return default index on Collection that exists asynchronously'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) when: @@ -114,11 +114,11 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return created indexes on Collection'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)]) - .execute(getBinding()) + new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique (true)], null).execute(getBinding()) when: BatchCursor cursor = operation.execute(getBinding()) @@ -134,11 +134,11 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should return created indexes on Collection asynchronously'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) - new CreateIndexesOperation(namespace, [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)]) - .execute(getBinding()) + new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) when: def cursor = executeAsync(operation) @@ -154,7 +154,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should use the set batchSize of collections'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).batchSize(2) collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) @@ -185,7 +185,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification def 'should use the set batchSize of collections asynchronously'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new DocumentCodec()).batchSize(2) collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) @@ -214,43 +214,26 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification } @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from execute'() { + def 'should throw execution timeout exception'() { given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS) + def operation = new ListIndexesOperation(CSOT_MAX_TIME.get(), getNamespace(), new DocumentCodec()) collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) enableMaxTimeFailPoint() when: - operation.execute(getBinding()) + execute(operation, async) then: thrown(MongoExecutionTimeoutException) cleanup: disableMaxTimeFailPoint() - } - - - @IgnoreIf({ isSharded() }) - def 'should throw execution timeout exception from executeAsync'() { - given: - def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).maxTime(1000, MILLISECONDS) - collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) - - enableMaxTimeFailPoint() - when: - executeAsync(operation) - - then: - thrown(MongoExecutionTimeoutException) - - cleanup: - disableMaxTimeFailPoint() + where: + async << [true, false] } - def 'should use the readPreference to set secondaryOk'() { given: def connection = Mock(Connection) @@ -264,7 +247,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification getReadConnectionSource() >> connectionSource getReadPreference() >> readPreference } - def operation = new ListIndexesOperation(helper.namespace, helper.decoder) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, helper.decoder) when: '3.6.0' operation.execute(readBinding) @@ -290,7 +273,7 @@ class ListIndexesOperationSpecification extends OperationFunctionalSpecification getReadPreference() >> readPreference getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } } - def operation = new ListIndexesOperation(helper.namespace, helper.decoder) + def operation = new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, helper.decoder) when: '3.6.0' operation.executeAsync(readBinding, Stub(SingleResultCallback)) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy index 62161de7a37..a59cabb437c 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy @@ -37,20 +37,22 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet import static com.mongodb.ClusterFixture.serverVersionAtLeast import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.client.model.Filters.gte -import static java.util.concurrent.TimeUnit.MILLISECONDS class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpecification { def mapReduceInputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceInput') def mapReduceOutputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceOutput') - def mapReduceOperation = new MapReduceToCollectionOperation(mapReduceInputNamespace, - new BsonJavaScript('function(){ emit( this.name , 1 ); }'), - new BsonJavaScript('function(key, values){ return values.length; }'), - mapReduceOutputNamespace.getCollectionName()) + def mapReduceOperation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), mapReduceInputNamespace, + new BsonJavaScript('function(){ emit( this.name , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + mapReduceOutputNamespace.getCollectionName(), null) def expectedResults = [new BsonDocument('_id', new BsonString('Pete')).append('value', new BsonDouble(2.0)), new BsonDocument('_id', new BsonString('Sam')).append('value', new BsonDouble(1.0))] as Set def helper = new CollectionHelper(new BsonDocumentCodec(), mapReduceOutputNamespace) @@ -64,8 +66,8 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe } def cleanup() { - new DropCollectionOperation(mapReduceInputNamespace).execute(getBinding()) - new DropCollectionOperation(mapReduceOutputNamespace).execute(getBinding()) + new DropCollectionOperation(CSOT_TIMEOUT.get(), mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(CSOT_TIMEOUT.get(), mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) } def 'should have the correct defaults'() { @@ -75,7 +77,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def out = 'outCollection' when: - def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out) + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), mapF, reduceF, out, null) then: operation.getMapFunction() == mapF @@ -89,7 +91,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe operation.getLimit() == 0 operation.getScope() == null operation.getSort() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getBypassDocumentValidation() == null operation.getCollation() == null !operation.isJsMode() @@ -112,7 +113,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def writeConcern = WriteConcern.MAJORITY when: - def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, writeConcern) + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), mapF, reduceF, out, writeConcern) .action(action) .databaseName(dbName) .finalizeFunction(finalizeF) @@ -120,7 +121,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe .limit(10) .scope(scope) .sort(sort) - .maxTime(1, MILLISECONDS) .bypassDocumentValidation(true) .collation(defaultCollation) @@ -135,7 +135,6 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe operation.getLimit() == 10 operation.getScope() == scope operation.getSort() == sort - operation.getMaxTime(MILLISECONDS) == 1 operation.getBypassDocumentValidation() == true operation.getCollation() == defaultCollation } @@ -182,10 +181,10 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe getCollectionHelper().insertDocuments(new BsonDocument()) when: - def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), mapReduceInputNamespace, new BsonJavaScript('function(){ emit( "level" , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), - 'collectionOut') + 'collectionOut', null) execute(operation, async) then: @@ -216,7 +215,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def 'should throw on write concern error'() { given: getCollectionHelper().insertDocuments(new BsonDocument()) - def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), mapReduceInputNamespace, new BsonJavaScript('function(){ emit( "level" , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), 'collectionOut', new WriteConcern(5)) @@ -248,7 +247,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def dbName = 'dbName' when: - def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) def expectedCommand = new BsonDocument('mapreduce', new BsonString(getCollectionName())) .append('map', mapF) .append('reduce', reduceF) @@ -263,14 +262,14 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe ReadPreference.primary(), false) when: - operation.action(action) + operation = new MapReduceToCollectionOperation(CSOT_MAX_TIME.get(), getNamespace(), mapF, reduceF, out, WriteConcern.MAJORITY) + .action(action) .databaseName(dbName) .finalizeFunction(finalizeF) .filter(filter) .limit(10) .scope(scope) .sort(sort) - .maxTime(10, MILLISECONDS) .bypassDocumentValidation(true) .verbose(true) @@ -281,7 +280,7 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe .append('scope', scope) .append('verbose', BsonBoolean.TRUE) .append('limit', new BsonInt32(10)) - .append('maxTimeMS', new BsonInt64(10)) + .append('maxTimeMS', new BsonInt64(100)) if (includeCollation) { operation.collation(defaultCollation) @@ -309,10 +308,10 @@ class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpe def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper(mapReduceInputNamespace).insertDocuments(document) - def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + def operation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), mapReduceInputNamespace, new BsonJavaScript('function(){ emit( this._id, this.str ); }'), new BsonJavaScript('function(key, values){ return values; }'), - 'collectionOut') + 'collectionOut', null) .filter(BsonDocument.parse('{str: "FOO"}')) .collation(caseInsensitiveCollation) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy index b3884a1eb97..19ba06d51c5 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy @@ -46,16 +46,17 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.serverVersionLessThan import static com.mongodb.connection.ServerType.STANDALONE import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand import static com.mongodb.internal.operation.ServerVersionHelper.MIN_WIRE_VERSION -import static java.util.concurrent.TimeUnit.MILLISECONDS class MapReduceWithInlineResultsOperationSpecification extends OperationFunctionalSpecification { private final bsonDocumentCodec = new BsonDocumentCodec() - def mapReduceOperation = new MapReduceWithInlineResultsOperation( + def mapReduceOperation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), new BsonJavaScript('function(){ emit( this.name , 1 ); }'), new BsonJavaScript('function(key, values){ return values.length; }'), @@ -76,7 +77,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction when: def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, mapF, reduceF, + bsonDocumentCodec) then: operation.getMapFunction() == mapF @@ -85,7 +87,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction operation.getFinalizeFunction() == null operation.getScope() == null operation.getSort() == null - operation.getMaxTime(MILLISECONDS) == 0 operation.getLimit() == 0 operation.getCollation() == null !operation.isJsMode() @@ -100,7 +101,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def finalizeF = new BsonJavaScript('function(key, value){}') def mapF = new BsonJavaScript('function(){ }') def reduceF = new BsonJavaScript('function(key, values){ }') - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, + mapF, reduceF, bsonDocumentCodec) .filter(filter) .finalizeFunction(finalizeF) .scope(scope) @@ -108,7 +110,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .jsMode(true) .verbose(true) .limit(20) - .maxTime(10, MILLISECONDS) .collation(defaultCollation) then: @@ -118,7 +119,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction operation.getFinalizeFunction() == finalizeF operation.getScope() == scope operation.getSort() == sort - operation.getMaxTime(MILLISECONDS) == 10 operation.getLimit() == 20 operation.getCollation() == defaultCollation operation.isJsMode() @@ -141,8 +141,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should use the ReadBindings readPreference to set secondaryOk'() { when: - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) then: testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) @@ -153,12 +153,13 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction def 'should create the expected command'() { when: - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_MAX_TIME.get(), helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) def expectedCommand = new BsonDocument('mapreduce', new BsonString(helper.namespace.getCollectionName())) .append('map', operation.getMapFunction()) .append('reduce', operation.getReduceFunction()) .append('out', new BsonDocument('inline', new BsonInt32(1))) + .append('maxTimeMS', new BsonInt64(100)) then: testOperation(operation, serverVersion, expectedCommand, async, helper.commandResult) @@ -171,7 +172,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .jsMode(true) .verbose(true) .limit(20) - .maxTime(10, MILLISECONDS) expectedCommand.append('query', operation.getFilter()) @@ -180,7 +180,6 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction .append('finalize', operation.getFinalizeFunction()) .append('jsMode', BsonBoolean.TRUE) .append('verbose', BsonBoolean.TRUE) - .append('maxTimeMS', new BsonInt64(10)) .append('limit', new BsonInt32(20)) if (includeCollation) { @@ -204,7 +203,7 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction given: def document = Document.parse('{_id: 1, str: "foo"}') getCollectionHelper().insertDocuments(document) - def operation = new MapReduceWithInlineResultsOperation( + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), namespace, new BsonJavaScript('function(){ emit( this.str, 1 ); }'), new BsonJavaScript('function(key, values){ return Array.sum(values); }'), @@ -242,8 +241,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: operation.execute(binding) @@ -291,8 +290,8 @@ class MapReduceWithInlineResultsOperationSpecification extends OperationFunction }''') appendReadConcernToCommand(sessionContext, MIN_WIRE_VERSION, commandDocument) - def operation = new MapReduceWithInlineResultsOperation(helper.namespace, new BsonJavaScript('function(){ }'), - new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def operation = new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) when: executeAsync(operation, binding) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy index 7e7938acfe2..4f43fec5d04 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy @@ -48,6 +48,7 @@ import org.bson.types.ObjectId import spock.lang.IgnoreIf import util.spock.annotations.Slow +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.configureFailPoint import static com.mongodb.ClusterFixture.disableFailPoint import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint @@ -72,7 +73,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw IllegalArgumentException for empty list of requests'() { when: - new MixedBulkWriteOperation(getNamespace(), [], true, ACKNOWLEDGED, false) + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], true, ACKNOWLEDGED, false) then: thrown(IllegalArgumentException) @@ -80,7 +81,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should have the expected passed values'() { when: - def operation = new MixedBulkWriteOperation(getNamespace(), requests, ordered, writeConcern, retryWrites) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), requests, ordered, writeConcern, retryWrites) .bypassDocumentValidation(bypassValidation) then: @@ -100,8 +101,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when no document with the same id exists, should insert the document'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], - ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -120,7 +121,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def document = new BsonDocument('_id', new BsonInt32(1)) getCollectionHelper().insertDocuments(document) - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(document)], ordered, + ACKNOWLEDGED, false) when: execute(operation, async) @@ -135,8 +137,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'RawBsonDocument should not generate an _id'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], - ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -154,7 +156,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, a remove of one should remove one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE)).multi(false)], ordered, ACKNOWLEDGED, false) @@ -173,7 +175,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true), new Document('x', false)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE))], ordered, ACKNOWLEDGED, false) @@ -191,7 +193,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when multiple document match the query, update of one should update only one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(false)], @@ -211,7 +213,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, update multi should update all of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(true)], ordered, ACKNOWLEDGED, false) @@ -231,7 +233,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def id = new ObjectId() def query = new BsonDocument('_id', new BsonObjectId(id)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), UPDATE).upsert(true)], ordered, ACKNOWLEDGED, false) @@ -250,7 +252,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def id = new ObjectId() def query = new BsonDocument('_id', new BsonObjectId(id)) given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), UPDATE).upsert(true).multi(true)], ordered, ACKNOWLEDGED, false) @@ -270,7 +272,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents matches the query, update one with upsert should update only one of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).multi(false).upsert(true)], @@ -290,7 +292,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents match the query, update multi with upsert should update all of them'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), UPDATE).upsert(true).multi(true)], @@ -310,7 +312,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when updating with an empty document, update should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), UPDATE)], true, ACKNOWLEDGED, false) @@ -327,7 +329,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when replacing with an empty document, update should not throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), REPLACE)], true, ACKNOWLEDGED, false) @@ -344,7 +346,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when updating with an invalid document, update should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('a', new BsonInt32(1)), UPDATE)], true, ACKNOWLEDGED, false) @@ -362,7 +364,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when replacing an invalid document, replace should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), REPLACE)], true, ACKNOWLEDGED, false) @@ -381,7 +383,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(5, 0) }) def 'when inserting a document with a field starting with a dollar sign, insert should not throw'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(new BsonDocument('$inc', new BsonDocument('x', new BsonInt32(1))))], true, ACKNOWLEDGED, false) @@ -398,12 +400,12 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a document contains a key with an illegal character, replacing a document with it should throw IllegalArgumentException'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), - [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), - REPLACE) - .upsert(true)], - true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), + REPLACE) + .upsert(true)], + true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -418,7 +420,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when no document matches the query, a replace with upsert should insert a document'() { given: def id = new ObjectId() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('_id', new BsonObjectId(id)) .append('x', new BsonInt32(2)), @@ -439,7 +441,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a custom _id is upserted it should be in the write result'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), UPDATE) @@ -471,7 +473,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'unacknowledged upserts with custom _id should not error'() { given: def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), UPDATE) @@ -503,7 +505,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), new BsonDocument('y', new BsonInt32(1)).append('x', BsonBoolean.FALSE), REPLACE).upsert(true)], @@ -524,7 +526,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when a replacement document is 16MB, the document is still replaced'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)) .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), @@ -545,16 +547,16 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when two update documents together exceed 16MB, the documents are still updated'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1), new Document('_id', 2)) - def operation = new MixedBulkWriteOperation(getNamespace(), - [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), - new BsonDocument('_id', new BsonInt32(1)) - .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), - REPLACE), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), - new BsonDocument('_id', new BsonInt32(2)) - .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), - REPLACE)], - true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(2)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE)], + true, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -571,7 +573,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents together are just below the max message size, the documents are still inserted'() { given: def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 33)]) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [ new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), @@ -592,7 +594,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when documents together are just above the max message size, the documents are still inserted'() { given: def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 32)]) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [ new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), @@ -613,7 +615,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should handle multi-length runs of ordered insert, update, replace, and remove'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), getTestWrites(), ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -636,13 +638,13 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should handle multi-length runs of UNACKNOWLEDGED insert, update, replace, and remove'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, false) def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() when: def result = execute(operation, binding) - execute(new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, - false,), binding) + execute(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, + [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, false,), binding) then: !result.wasAcknowledged() @@ -671,7 +673,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat (1..numberOfWrites).each { writes.add(new InsertRequest(new BsonDocument())) } - def operation = new MixedBulkWriteOperation(getNamespace(), writes, ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), writes, ordered, ACKNOWLEDGED, false) when: execute(operation, binding) @@ -694,7 +696,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat writeOperations.add(upsert) writeOperations.add(new DeleteRequest(new BsonDocument('key', new BsonInt32(it)))) } - def operation = new MixedBulkWriteOperation(getNamespace(), writeOperations, ordered, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), writeOperations, ordered, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -709,13 +711,13 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'error details should have correct index on ordered write failure'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), - UPDATE), - new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2 - ], true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2 + ], true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -732,13 +734,13 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'error details should have correct index on unordered write failure'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), - new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), - new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), - UPDATE), - new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2 - ], false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2 + ], false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -761,7 +763,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat for (int i = 0; i < 2000; i++) { inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) } - def operation = new MixedBulkWriteOperation(getNamespace(), inserts, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), inserts, false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -783,7 +785,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat for (int i = 0; i < 2000; i++) { inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) } - def operation = new MixedBulkWriteOperation(getNamespace(), inserts, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), inserts, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -803,9 +805,9 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ !isDiscoverableReplicaSet() }) def 'should throw bulk write exception with a write concern error when wtimeout is exceeded'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], - false, new WriteConcern(5, 1), false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + false, new WriteConcern(5, 1), false) when: execute(operation, async) @@ -822,10 +824,10 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'when there is a duplicate key error and a write concern error, both should be reported'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), - [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), - new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key - ], false, new WriteConcern(4, 1), false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key + ], false, new WriteConcern(4, 1), false) when: execute(operation, async) // This is assuming that it won't be able to replicate to 4 servers in 1 ms @@ -845,7 +847,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw on write concern error on multiple failpoint'() { given: getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new DeleteRequest(new BsonDocument('_id', new BsonInt32(2))), // existing key new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // existing (duplicate) key ], true, ACKNOWLEDGED, true) @@ -877,7 +879,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw IllegalArgumentException when passed an empty bulk operation'() { when: - new MixedBulkWriteOperation(getNamespace(), [], ordered, UNACKNOWLEDGED, false) + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [], ordered, UNACKNOWLEDGED, false) then: thrown(IllegalArgumentException) @@ -889,7 +891,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 2) }) def 'should throw if bypassDocumentValidation is set and writeConcern is UNACKNOWLEDGED'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, UNACKNOWLEDGED, false) .bypassDocumentValidation(bypassDocumentValidation) @@ -906,7 +908,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 4) }) def 'should throw if collation is set and write is UNACKNOWLEDGED'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new DeleteRequest(BsonDocument.parse('{ level: 9 }')).collation(defaultCollation)], true, UNACKNOWLEDGED, false) when: @@ -926,8 +928,8 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def collectionHelper = getCollectionHelper(namespace) collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions( new ValidationOptions().validator(gte('level', 10)))) - def operation = new MixedBulkWriteOperation(namespace, [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, - ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, ACKNOWLEDGED, false) when: execute(operation, async) @@ -962,7 +964,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new ValidationOptions().validator(gte('level', 10)))) collectionHelper.insertDocuments(BsonDocument.parse('{ x: true, level: 10}')) - def operation = new MixedBulkWriteOperation(namespace, + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new UpdateRequest(BsonDocument.parse('{x: true}'), BsonDocument.parse('{$inc: {level: -1}}'), UPDATE).multi(false)], ordered, ACKNOWLEDGED, false) @@ -990,7 +992,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('y', 1), new Document('z', 1)) - def operation = new MixedBulkWriteOperation(namespace, requests, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, false, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1021,7 +1023,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def requests = [new DeleteRequest(BsonDocument.parse('{str: "FOO"}}')).collation(caseInsensitiveCollation), new UpdateRequest(BsonDocument.parse('{str: "BAR"}}'), BsonDocument.parse('{str: "bar"}}'), REPLACE) .collation(caseInsensitiveCollation)] - def operation = new MixedBulkWriteOperation(namespace, requests, false, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, false, ACKNOWLEDGED, false) when: BulkWriteResult result = execute(operation, async) @@ -1039,7 +1041,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def testWrites = getTestWrites() Collections.shuffle(testWrites) getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), testWrites, true, ACKNOWLEDGED, true) when: if (serverVersionAtLeast(3, 6) && isDiscoverableReplicaSet()) { @@ -1082,7 +1084,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def testWrites = getTestWrites() getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), testWrites, true, ACKNOWLEDGED, true) when: enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) @@ -1107,7 +1109,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat given: def testWrites = getTestWrites() getCollectionHelper().insertDocuments(getTestInserts()) - def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, UNACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), testWrites, true, UNACKNOWLEDGED, true) when: enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) @@ -1131,7 +1133,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should retry if the connection initially fails'() { when: def cannedResult = BsonDocument.parse('{ok: 1.0, n: 1}') - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) def expectedCommand = new BsonDocument('insert', new BsonString(getNamespace().getCollectionName())) .append('ordered', BsonBoolean.TRUE) @@ -1146,7 +1148,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat def 'should throw original error when retrying and failing'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) def originalException = new MongoSocketException('Some failure', new ServerAddress()) @@ -1173,7 +1175,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat @IgnoreIf({ serverVersionLessThan(3, 6) }) def 'should not request retryable write for multi updates or deletes'() { given: - def operation = new MixedBulkWriteOperation(getNamespace(), writes, true, ACKNOWLEDGED, true) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), writes, true, ACKNOWLEDGED, true) when: executeWithSession(operation, async) @@ -1221,7 +1223,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat .multi(true) .arrayFilters([BsonDocument.parse('{"i.b": 1}')]), ] - def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1243,7 +1245,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .arrayFilters([BsonDocument.parse('{"i.b": 3}')]) ] - def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1262,7 +1264,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .arrayFilters([BsonDocument.parse('{"i.b": 3}')]) ] - def operation = new MixedBulkWriteOperation(namespace, requests, true, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, true, UNACKNOWLEDGED, false) when: execute(operation, async) @@ -1281,7 +1283,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .hint(BsonDocument.parse('{ _id: 1 }')) ] - def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, true, ACKNOWLEDGED, false) when: execute(operation, async) @@ -1300,7 +1302,7 @@ class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecificat new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) .hintString('_id') ] - def operation = new MixedBulkWriteOperation(namespace, requests, true, UNACKNOWLEDGED, false) + def operation = new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, requests, true, UNACKNOWLEDGED, false) when: execute(operation, async) diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy index 56c0029786c..d345cf421f9 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy @@ -25,6 +25,8 @@ import org.bson.Document import org.bson.codecs.DocumentCodec import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT import static com.mongodb.ClusterFixture.executeAsync import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -35,61 +37,43 @@ import static com.mongodb.ClusterFixture.serverVersionLessThan class RenameCollectionOperationSpecification extends OperationFunctionalSpecification { def cleanup() { - new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding()) + new DropCollectionOperation(CSOT_TIMEOUT.get(), new MongoNamespace(getDatabaseName(), 'newCollection'), + WriteConcern.ACKNOWLEDGED).execute(getBinding()) } def 'should return rename a collection'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), null) when: - new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection')).execute(getBinding()) + execute(operation, async) then: !collectionNameExists(getCollectionName()) collectionNameExists('newCollection') - } - - - def 'should return rename a collection asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) - assert collectionNameExists(getCollectionName()) - - when: - executeAsync(new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'))) - then: - !collectionNameExists(getCollectionName()) - collectionNameExists('newCollection') + where: + async << [true, false] } def 'should throw if not drop and collection exists'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), getNamespace(), null) when: - new RenameCollectionOperation(getNamespace(), getNamespace()).execute(getBinding()) + execute(operation, async) then: thrown(MongoServerException) collectionNameExists(getCollectionName()) - } - - def 'should throw if not drop and collection exists asynchronously'() { - given: - getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) - assert collectionNameExists(getCollectionName()) - - when: - executeAsync(new RenameCollectionOperation(getNamespace(), getNamespace())) - - then: - thrown(MongoServerException) - collectionNameExists(getCollectionName()) + where: + async << [true, false] } @IgnoreIf({ serverVersionLessThan(3, 4) || !isDiscoverableReplicaSet() }) @@ -97,8 +81,8 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) assert collectionNameExists(getCollectionName()) - def operation = new RenameCollectionOperation(getNamespace(), new MongoNamespace(getDatabaseName(), 'newCollection'), - new WriteConcern(5)) + def operation = new RenameCollectionOperation(CSOT_NO_TIMEOUT.get(), getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), new WriteConcern(5)) when: async ? executeAsync(operation) : operation.execute(getBinding()) @@ -112,9 +96,8 @@ class RenameCollectionOperationSpecification extends OperationFunctionalSpecific async << [true, false] } - def collectionNameExists(String collectionName) { - def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) + def cursor = new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, new DocumentCodec()).execute(getBinding()) if (!cursor.hasNext()) { return false } diff --git a/driver-core/src/test/unit/com/mongodb/internal/ClientSideOperationTimeoutsTest.java b/driver-core/src/test/unit/com/mongodb/internal/ClientSideOperationTimeoutsTest.java new file mode 100644 index 00000000000..75553b1fc67 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/ClientSideOperationTimeoutsTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.TestFactory; + +import java.util.Collection; + +import static com.mongodb.ClusterFixture.sleep; +import static com.mongodb.internal.ClientSideOperationTimeouts.NO_TIMEOUT; +import static com.mongodb.internal.ClientSideOperationTimeouts.create; +import static com.mongodb.internal.ClientSideOperationTimeouts.withMaxCommitMS; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +final class ClientSideOperationTimeoutsTest { + + + @TestFactory + Collection clientSideOperationTimeoutsTest() { + return asList( + dynamicTest("test defaults", () -> { + ClientSideOperationTimeout clientSideOperationTimeout = NO_TIMEOUT; + assertAll( + () -> assertFalse(clientSideOperationTimeout.hasTimeoutMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxAwaitTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxCommitTimeMS()) + ); + }), + dynamicTest("Uses timeoutMS if set", () -> { + long altTimeout = 9; + ClientSideOperationTimeout clientSideOperationTimeout = create(99999999L, altTimeout, altTimeout, altTimeout); + assertAll( + () -> assertTrue(clientSideOperationTimeout.hasTimeoutMS()), + () -> assertTrue(clientSideOperationTimeout.getMaxTimeMS() > 0), + () -> assertTrue(clientSideOperationTimeout.getMaxAwaitTimeMS() > 0), + () -> assertTrue(clientSideOperationTimeout.getMaxCommitTimeMS() > 0) + ); + }), + dynamicTest("MaxTimeMS set", () -> { + ClientSideOperationTimeout clientSideOperationTimeout = create(null, 9); + assertAll( + () -> assertEquals(9, clientSideOperationTimeout.getMaxTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxAwaitTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxCommitTimeMS()) + ); + }), + dynamicTest("MaxTimeMS and MaxAwaitTimeMS set", () -> { + ClientSideOperationTimeout clientSideOperationTimeout = create(null, 9, 99); + assertAll( + () -> assertEquals(9, clientSideOperationTimeout.getMaxTimeMS()), + () -> assertEquals(99, clientSideOperationTimeout.getMaxAwaitTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxCommitTimeMS()) + ); + }), + dynamicTest("MaxCommitTimeMS set", () -> { + ClientSideOperationTimeout clientSideOperationTimeout = withMaxCommitMS(null, 9L); + assertAll( + () -> assertEquals(0, clientSideOperationTimeout.getMaxTimeMS()), + () -> assertEquals(0, clientSideOperationTimeout.getMaxAwaitTimeMS()), + () -> assertEquals(9L, clientSideOperationTimeout.getMaxCommitTimeMS()) + ); + }), + dynamicTest("All deprecated options set", () -> { + ClientSideOperationTimeout clientSideOperationTimeout = create(null, 99, 9L, 999); + assertAll( + () -> assertEquals(9, clientSideOperationTimeout.getMaxAwaitTimeMS()), + () -> assertEquals(99, clientSideOperationTimeout.getMaxTimeMS()), + () -> assertEquals(999, clientSideOperationTimeout.getMaxCommitTimeMS()) + ); + }), + dynamicTest("Use timeout if available or the alternative", () -> assertAll( + () -> assertEquals(99L, NO_TIMEOUT.timeoutOrAlternative(99)), + () -> assertEquals(0L, ClientSideOperationTimeouts.create(0L).timeoutOrAlternative(99)), + () -> assertTrue(ClientSideOperationTimeouts.create(999L).timeoutOrAlternative(0) <= 999), + () -> assertTrue(ClientSideOperationTimeouts.create(999L).timeoutOrAlternative(999999) <= 999) + )), + dynamicTest("Calculate min works as expected", () -> assertAll( + () -> assertEquals(99L, NO_TIMEOUT.calculateMin(99)), + () -> assertEquals(99L, ClientSideOperationTimeouts.create(0L).calculateMin(99)), + () -> assertTrue(ClientSideOperationTimeouts.create(999L).calculateMin(0) <= 999), + () -> assertTrue(ClientSideOperationTimeouts.create(999L).calculateMin(999999) <= 999) + )), + dynamicTest("Expired works as expected", () -> { + ClientSideOperationTimeout smallTimeout = ClientSideOperationTimeouts.create(1L); + ClientSideOperationTimeout longTimeout = ClientSideOperationTimeouts.create(999999999L); + ClientSideOperationTimeout noTimeout = NO_TIMEOUT; + sleep(100); + assertAll( + () -> assertFalse(noTimeout.expired()), + () -> assertFalse(longTimeout.expired()), + () -> assertTrue(smallTimeout.expired()) + ); + }) + ); + } + + private ClientSideOperationTimeoutsTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java index a596b637735..4a01a837f76 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java @@ -81,6 +81,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.assertions.Assertions.assertFalse; import static java.lang.String.format; import static org.junit.Assert.assertEquals; @@ -474,7 +475,8 @@ private Event getNextEvent(final Iterator eventsIterator, final } private static void executeAdminCommand(final BsonDocument command) { - new CommandReadOperation<>("admin", command, new BsonDocumentCodec()).execute(ClusterFixture.getBinding()); + new CommandReadOperation<>(CSOT_NO_TIMEOUT.get(), "admin", command, new BsonDocumentCodec()) + .execute(ClusterFixture.getBinding()); } private void setFailPoint() { diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy index f897413e12d..d0c911eb144 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy @@ -36,6 +36,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.Decoder import spock.lang.Specification +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ReadPreference.primary import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync @@ -54,7 +55,7 @@ class AsyncOperationHelperSpecification extends Specification { getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0, 0]) getServerType() >> ServerType.REPLICA_SET_PRIMARY } - def commandCreator = { serverDesc, connectionDesc -> command } + def commandCreator = { csot, serverDesc, connectionDesc -> command } def callback = new SingleResultCallback() { def result def throwable @@ -89,7 +90,7 @@ class AsyncOperationHelperSpecification extends Specification { } when: - executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, + executeRetryableWriteAsync(CSOT_NO_TIMEOUT.get(), asyncWriteBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, commandCreator, FindAndModifyHelper.asyncTransformer(), { cmd -> cmd }, callback) then: @@ -129,7 +130,7 @@ class AsyncOperationHelperSpecification extends Specification { given: def dbName = 'db' def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = Stub(Decoder) def callback = Stub(SingleResultCallback) def function = Stub(CommandReadTransformerAsync) @@ -146,7 +147,7 @@ class AsyncOperationHelperSpecification extends Specification { def connectionDescription = Stub(ConnectionDescription) when: - executeRetryableReadAsync(asyncReadBinding, dbName, commandCreator, decoder, function, false, callback) + executeRetryableReadAsync(CSOT_NO_TIMEOUT.get(), asyncReadBinding, dbName, commandCreator, decoder, function, false, callback) then: _ * connection.getDescription() >> connectionDescription diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy index dc17329ae91..7eae3616a9e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy @@ -24,6 +24,8 @@ import com.mongodb.internal.binding.AsyncWriteBinding import com.mongodb.internal.binding.WriteBinding import com.mongodb.internal.session.SessionContext +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT + class CommitTransactionOperationUnitSpecification extends OperationUnitSpecification { def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException'() { given: @@ -33,7 +35,7 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica hasActiveTransaction() >> true } } - def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) + def operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), WriteConcern.ACKNOWLEDGED) when: operation.execute(writeBinding) @@ -53,7 +55,7 @@ class CommitTransactionOperationUnitSpecification extends OperationUnitSpecifica hasActiveTransaction() >> true } } - def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) + def operation = new CommitTransactionOperation(CSOT_NO_TIMEOUT.get(), WriteConcern.ACKNOWLEDGED) def callback = new FutureResultCallback() when: diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy index 0128b4158ee..4778ef5b6ce 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy @@ -27,21 +27,23 @@ import org.bson.Document import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.DocumentCodec +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CursorType.TailableAwait -import static java.util.concurrent.TimeUnit.MILLISECONDS +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME class FindOperationUnitSpecification extends OperationUnitSpecification { def 'should find with correct command'() { when: - def operation = new FindOperation(namespace, new BsonDocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new BsonDocumentCodec()) def expectedCommand = new BsonDocument('find', new BsonString(namespace.getCollectionName())) then: testOperation(operation, [3, 2, 0], expectedCommand, async, commandResult) // Overrides when: - operation.filter(new BsonDocument('a', BsonBoolean.TRUE)) + operation = new FindOperation(CSOT_MAX_TIME.get(), namespace, new BsonDocumentCodec()) + .filter(new BsonDocument('a', BsonBoolean.TRUE)) .projection(new BsonDocument('x', new BsonInt32(1))) .skip(2) .limit(limit) @@ -51,7 +53,6 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { .noCursorTimeout(true) .partial(true) .oplogReplay(true) - .maxTime(10, MILLISECONDS) .comment(new BsonString('my comment')) .hint(BsonDocument.parse('{ hint : 1}')) .min(BsonDocument.parse('{ abc: 99 }')) @@ -71,7 +72,7 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { .append('allowPartialResults', BsonBoolean.TRUE) .append('noCursorTimeout', BsonBoolean.TRUE) .append('oplogReplay', BsonBoolean.TRUE) - .append('maxTimeMS', new BsonInt64(operation.getMaxTime(MILLISECONDS))) + .append('maxTimeMS', new BsonInt64(100)) .append('comment', operation.getComment()) .append('hint', operation.getHint()) .append('min', operation.getMin()) @@ -108,7 +109,7 @@ class FindOperationUnitSpecification extends OperationUnitSpecification { def 'should use the readPreference to set secondaryOk for commands'() { when: - def operation = new FindOperation(namespace, new DocumentCodec()) + def operation = new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()) then: testOperationSecondaryOk(operation, [3, 2, 0], readPreference, async, commandResult) diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy index a18148911bf..13add5121a1 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy @@ -35,6 +35,7 @@ import org.bson.codecs.BsonDocumentCodec import org.bson.codecs.Decoder import spock.lang.Specification +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ReadPreference.primary import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer @@ -75,7 +76,7 @@ class SyncOperationHelperSpecification extends Specification { given: def dbName = 'db' def command = BsonDocument.parse('''{findAndModify: "coll", query: {a: 1}, new: false, update: {$inc: {a :1}}, txnNumber: 1}''') - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = new BsonDocumentCodec() def results = [ BsonDocument.parse('{ok: 1.0, writeConcernError: {code: 91, errmsg: "Replication is being shut down"}}'), @@ -104,8 +105,8 @@ class SyncOperationHelperSpecification extends Specification { } when: - executeRetryableWrite(writeBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, commandCreator, - FindAndModifyHelper.transformer()) { cmd -> cmd } + executeRetryableWrite(CSOT_NO_TIMEOUT.get(), writeBinding, dbName, primary(), new NoOpFieldNameValidator(), decoder, commandCreator, + FindAndModifyHelper.transformer()) { cmd -> cmd } then: 2 * connection.command(dbName, command, _, primary(), decoder, writeBinding) >> { results.poll() } @@ -119,7 +120,7 @@ class SyncOperationHelperSpecification extends Specification { given: def dbName = 'db' def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) - def commandCreator = { serverDescription, connectionDescription -> command } + def commandCreator = { csot, serverDescription, connectionDescription -> command } def decoder = Stub(Decoder) def function = Stub(CommandReadTransformer) def connection = Mock(Connection) @@ -134,7 +135,7 @@ class SyncOperationHelperSpecification extends Specification { def connectionDescription = Stub(ConnectionDescription) when: - executeRetryableRead(readBinding, dbName, commandCreator, decoder, function, false) + executeRetryableRead(CSOT_NO_TIMEOUT.get(), readBinding, dbName, commandCreator, decoder, function, false) then: _ * connection.getDescription() >> connectionDescription diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt index 756c884608a..fa26fae86c1 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt @@ -55,6 +55,7 @@ import com.mongodb.client.result.InsertManyResult import com.mongodb.client.result.InsertOneResult import com.mongodb.client.result.UpdateResult import com.mongodb.kotlin.client.coroutine.MongoCollection +import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.toCollection import kotlinx.coroutines.runBlocking import org.bson.Document @@ -74,6 +75,7 @@ data class SyncMongoCollection(val wrapped: MongoCollection) : JMong override fun getWriteConcern(): WriteConcern = wrapped.writeConcern override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) override fun withDocumentClass(clazz: Class): SyncMongoCollection = SyncMongoCollection(wrapped.withDocumentClass(clazz)) @@ -90,6 +92,9 @@ data class SyncMongoCollection(val wrapped: MongoCollection) : JMong override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = SyncMongoCollection(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + override fun countDocuments(): Long = runBlocking { wrapped.countDocuments() } override fun countDocuments(filter: Bson): Long = runBlocking { wrapped.countDocuments(filter) } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt index 0fb12bddc70..a60256491c6 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt @@ -28,6 +28,7 @@ import com.mongodb.client.MongoIterable import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions import com.mongodb.kotlin.client.coroutine.MongoDatabase +import java.util.concurrent.TimeUnit import kotlinx.coroutines.runBlocking import org.bson.Document import org.bson.codecs.configuration.CodecRegistry @@ -44,6 +45,8 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -56,6 +59,9 @@ data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + override fun getCollection(collectionName: String): MongoCollection = SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt index b1026c359f9..943741a7283 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt @@ -87,6 +87,27 @@ public class MongoCollection(private val wrapped: JMongoCollection) public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new collection instance with a different default class to cast any documents returned from the database * into. @@ -150,6 +171,21 @@ public class MongoCollection(private val wrapped: JMongoCollection) public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 4.x + */ + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + /** * Counts the number of documents in the collection. * diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt index 974533be7f5..133d5f7b6ed 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt @@ -57,6 +57,27 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -100,6 +121,21 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 4.x + */ + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + /** * Gets a collection. * diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt index e8e121f85dc..7be5c068a84 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt @@ -72,7 +72,16 @@ class MongoCollectionTest { fun shouldHaveTheSameMethods() { val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() val kMongoCollectionFunctions = - MongoCollection::class.declaredFunctions.map { it.name }.toSet() + + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoCollection::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt index 4ba7502bd24..031e2e6d1ef 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt @@ -54,7 +54,16 @@ class MongoDatabaseTest { fun shouldHaveTheSameMethods() { val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() val kMongoDatabaseFunctions = - MongoDatabase::class.declaredFunctions.map { it.name }.toSet() + + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoDatabase::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt index 952b05d32e5..51c3a7db7e1 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt @@ -56,6 +56,7 @@ import com.mongodb.client.result.InsertOneResult import com.mongodb.client.result.UpdateResult import com.mongodb.kotlin.client.MongoCollection import java.lang.UnsupportedOperationException +import java.util.concurrent.TimeUnit import org.bson.Document import org.bson.codecs.configuration.CodecRegistry import org.bson.conversions.Bson @@ -73,6 +74,7 @@ internal class SyncMongoCollection(val wrapped: MongoCollection) : J override fun getWriteConcern(): WriteConcern = wrapped.writeConcern override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) override fun withDocumentClass(clazz: Class): SyncMongoCollection = SyncMongoCollection(wrapped.withDocumentClass(clazz)) @@ -89,6 +91,9 @@ internal class SyncMongoCollection(val wrapped: MongoCollection) : J override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = SyncMongoCollection(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + override fun countDocuments(): Long = wrapped.countDocuments() override fun countDocuments(filter: Bson): Long = wrapped.countDocuments(filter) diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt index 20b0051488f..8f4476dc5fe 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt @@ -28,6 +28,7 @@ import com.mongodb.client.MongoIterable import com.mongodb.client.model.CreateCollectionOptions import com.mongodb.client.model.CreateViewOptions import com.mongodb.kotlin.client.MongoDatabase +import java.util.concurrent.TimeUnit import org.bson.Document import org.bson.codecs.configuration.CodecRegistry import org.bson.conversions.Bson @@ -43,6 +44,8 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -55,6 +58,9 @@ internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + override fun getCollection(collectionName: String): MongoCollection = SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt index 1529af7eaba..78f794fa0ec 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt @@ -84,6 +84,27 @@ public class MongoCollection(private val wrapped: JMongoCollection) public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new collection instance with a different default class to cast any documents returned from the database * into. @@ -147,6 +168,21 @@ public class MongoCollection(private val wrapped: JMongoCollection) public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 4.x + */ + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + /** * Counts the number of documents in the collection. * diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt index 6ddfbd2c652..8c1cdbd5cf2 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt @@ -53,6 +53,27 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public val writeConcern: WriteConcern get() = wrapped.writeConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -96,6 +117,20 @@ public class MongoDatabase(private val wrapped: JMongoDatabase) { public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 4.x + */ + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + /** * Gets a collection. * diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt index d458c9302ce..e27b7852bba 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt @@ -71,7 +71,16 @@ class MongoCollectionTest { fun shouldHaveTheSameMethods() { val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() - "mapReduce" val kMongoCollectionFunctions = - MongoCollection::class.declaredFunctions.map { it.name }.toSet() + + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoCollection::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt index 6a7264545dc..1a7bc1d25c2 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt @@ -52,7 +52,16 @@ class MongoDatabaseTest { fun shouldHaveTheSameMethods() { val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() val kMongoDatabaseFunctions = - MongoDatabase::class.declaredFunctions.map { it.name }.toSet() + + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + MongoDatabase::class .declaredMemberProperties .filterNot { it.name == "wrapped" } diff --git a/driver-legacy/src/main/com/mongodb/DB.java b/driver-legacy/src/main/com/mongodb/DB.java index df3a7b41076..6b199ea0c9a 100644 --- a/driver-legacy/src/main/com/mongodb/DB.java +++ b/driver-legacy/src/main/com/mongodb/DB.java @@ -23,6 +23,7 @@ import com.mongodb.client.model.DBCreateViewOptions; import com.mongodb.client.model.ValidationAction; import com.mongodb.client.model.ValidationLevel; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.operation.BatchCursor; import com.mongodb.internal.operation.CommandReadOperation; import com.mongodb.internal.operation.CreateCollectionOperation; @@ -195,7 +196,8 @@ public DBCollection getCollection(final String name) { */ public void dropDatabase() { try { - getExecutor().execute(new DropDatabaseOperation(getName(), getWriteConcern()), getReadConcern()); + getExecutor().execute(new DropDatabaseOperation(ClientSideOperationTimeouts.create(getTimeoutMS()), + getName(), getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -220,11 +222,11 @@ public String getName() { public Set getCollectionNames() { List collectionNames = new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), - mongo.getMongoClientOptions().getRetryReads()) { + mongo.getMongoClientOptions().getRetryReads(), null) { @Override public ReadOperation> asReadOperation() { - return new ListCollectionsOperation<>(name, commandCodec) - .nameOnly(true); + return new ListCollectionsOperation<>(ClientSideOperationTimeouts.create(DB.this.getTimeoutMS()), + name, commandCodec).nameOnly(true); } }.map(result -> (String) result.get("name")).into(new ArrayList<>()); Collections.sort(collectionNames); @@ -304,8 +306,9 @@ public DBCollection createView(final String viewName, final String viewOn, final try { notNull("options", options); DBCollection view = getCollection(viewName); - executor.execute(new CreateViewOperation(name, viewName, viewOn, view.preparePipeline(pipeline), writeConcern) - .collation(options.getCollation()), getReadConcern()); + executor.execute(new CreateViewOperation(ClientSideOperationTimeouts.create(getTimeoutMS()), name, viewName, viewOn, + view.preparePipeline(pipeline), writeConcern) + .collation(options.getCollation()), getReadConcern()); return view; } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -380,7 +383,8 @@ private CreateCollectionOperation getCreateCollectionOperation(final String coll validationAction = ValidationAction.fromString((String) options.get("validationAction")); } Collation collation = DBObjectCollationHelper.createCollationFromOptions(options); - return new CreateCollectionOperation(getName(), collectionName, getWriteConcern()) + return new CreateCollectionOperation(ClientSideOperationTimeouts.create(getTimeoutMS()), getName(), collectionName, + getWriteConcern()) .capped(capped) .collation(collation) .sizeInBytes(sizeInBytes) @@ -513,7 +517,8 @@ public String toString() { } CommandResult executeCommand(final BsonDocument commandDocument, final ReadPreference readPreference) { - return new CommandResult(executor.execute(new CommandReadOperation<>(getName(), commandDocument, + return new CommandResult(executor.execute( + new CommandReadOperation<>(ClientSideOperationTimeouts.create(getTimeoutMS()), getName(), commandDocument, new BsonDocumentCodec()), readPreference, getReadConcern()), getDefaultDBObjectCodec()); } @@ -561,6 +566,11 @@ Codec getDefaultDBObjectCodec() { .withUuidRepresentation(getMongoClient().getMongoClientOptions().getUuidRepresentation()); } + @Nullable + Long getTimeoutMS() { + return null; // TODO (CSOT) - JAVA-4064 + } + private static final Set OBEDIENT_COMMANDS = new HashSet<>(); static { diff --git a/driver-legacy/src/main/com/mongodb/DBCollection.java b/driver-legacy/src/main/com/mongodb/DBCollection.java index 7489fa21faa..5dcc2cd544b 100644 --- a/driver-legacy/src/main/com/mongodb/DBCollection.java +++ b/driver-legacy/src/main/com/mongodb/DBCollection.java @@ -26,6 +26,8 @@ import com.mongodb.client.model.DBCollectionFindOptions; import com.mongodb.client.model.DBCollectionRemoveOptions; import com.mongodb.client.model.DBCollectionUpdateOptions; +import com.mongodb.internal.ClientSideOperationTimeout; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.IndexRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -341,8 +343,8 @@ private Encoder toEncoder(@Nullable final DBEncoder dbEncoder) { private WriteResult insert(final List insertRequestList, final WriteConcern writeConcern, final boolean continueOnError, @Nullable final Boolean bypassDocumentValidation) { - return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), !continueOnError, writeConcern, - retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); + return executeWriteOperation(createBulkWriteOperationForInsert(getClientSideOperationTimeout(), getNamespace(), + !continueOnError, writeConcern, retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); } WriteResult executeWriteOperation(final LegacyMixedBulkWriteOperation operation) { @@ -425,8 +427,8 @@ private WriteResult replaceOrInsert(final DBObject obj, final Object id, final W UpdateRequest replaceRequest = new UpdateRequest(wrap(filter), wrap(obj, objectCodec), Type.REPLACE).upsert(true); - return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, writeConcern, retryWrites, - singletonList(replaceRequest))); + return executeWriteOperation(createBulkWriteOperationForReplace(getClientSideOperationTimeout(), getNamespace(), false, + writeConcern, retryWrites, singletonList(replaceRequest))); } /** @@ -578,8 +580,10 @@ public WriteResult update(final DBObject query, final DBObject update, final DBC .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), options.getEncoder())); LegacyMixedBulkWriteOperation operation = (updateType == UPDATE - ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest)) - : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, singletonList(updateRequest))) + ? createBulkWriteOperationForUpdate(getClientSideOperationTimeout(), getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest)) + : createBulkWriteOperationForReplace(getClientSideOperationTimeout(), getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest))) .bypassDocumentValidation(options.getBypassDocumentValidation()); return executeWriteOperation(operation); } @@ -651,8 +655,8 @@ public WriteResult remove(final DBObject query, final DBCollectionRemoveOptions WriteConcern optionsWriteConcern = options.getWriteConcern(); WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); DeleteRequest deleteRequest = new DeleteRequest(wrap(query, options.getEncoder())).collation(options.getCollation()); - return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, writeConcern, retryWrites, - singletonList(deleteRequest))); + return executeWriteOperation(createBulkWriteOperationForDelete(getClientSideOperationTimeout(), getNamespace(), false, + writeConcern, retryWrites, singletonList(deleteRequest))); } /** @@ -909,12 +913,12 @@ public long getCount(@Nullable final DBObject query) { */ public long getCount(@Nullable final DBObject query, final DBCollectionCountOptions options) { notNull("countOptions", options); - CountOperation operation = new CountOperation(getNamespace()) - .skip(options.getSkip()) - .limit(options.getLimit()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .collation(options.getCollation()) - .retryReads(retryReads); + CountOperation operation = new CountOperation( + getClientSideOperationTimeout(options.getMaxTime(MILLISECONDS)), getNamespace()) + .skip(options.getSkip()) + .limit(options.getLimit()) + .collation(options.getCollation()) + .retryReads(retryReads); if (query != null) { operation.filter(wrap(query)); } @@ -956,9 +960,9 @@ public DBCollection rename(final String newName) { */ public DBCollection rename(final String newName, final boolean dropTarget) { try { - executor.execute(new RenameCollectionOperation(getNamespace(), - new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) - .dropTarget(dropTarget), getReadConcern()); + executor.execute(new RenameCollectionOperation(getClientSideOperationTimeout(), getNamespace(), + new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) + .dropTarget(dropTarget), getReadConcern()); return getDB().getCollection(newName); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); @@ -1025,12 +1029,12 @@ public List distinct(final String fieldName, final DBObject query, final ReadPre public List distinct(final String fieldName, final DBCollectionDistinctOptions options) { notNull("fieldName", fieldName); return new MongoIterableImpl(null, executor, - options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(), - options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(), - retryReads) { + options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(), + options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(), + retryReads, null) { @Override public ReadOperation> asReadOperation() { - return new DistinctOperation<>(getNamespace(), fieldName, new BsonValueCodec()) + return new DistinctOperation<>(getClientSideOperationTimeout(), getNamespace(), fieldName, new BsonValueCodec()) .filter(wrapAllowNull(options.getFilter())) .collation(options.getCollation()) .retryReads(retryReads); @@ -1112,16 +1116,15 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { Boolean jsMode = command.getJsMode(); if (command.getOutputType() == MapReduceCommand.OutputType.INLINE) { - MapReduceWithInlineResultsOperation operation = - new MapReduceWithInlineResultsOperation<>(getNamespace(), new BsonJavaScript(command.getMap()), - new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) - .filter(wrapAllowNull(command.getQuery())) - .limit(command.getLimit()) - .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS) - .jsMode(jsMode != null && jsMode) - .sort(wrapAllowNull(command.getSort())) - .verbose(command.isVerbose()) - .collation(command.getCollation()); + MapReduceWithInlineResultsOperation operation = new MapReduceWithInlineResultsOperation<>( + getClientSideOperationTimeout(command.getMaxTime(MILLISECONDS)), getNamespace(), new BsonJavaScript(command.getMap()), + new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) + .filter(wrapAllowNull(command.getQuery())) + .limit(command.getLimit()) + .jsMode(jsMode != null && jsMode) + .sort(wrapAllowNull(command.getSort())) + .verbose(command.isVerbose()) + .collation(command.getCollation()); if (scope != null) { operation.scope(wrap(new BasicDBObject(scope))); @@ -1148,14 +1151,12 @@ public MapReduceOutput mapReduce(final MapReduceCommand command) { } MapReduceToCollectionOperation operation = - new MapReduceToCollectionOperation(getNamespace(), - new BsonJavaScript(command.getMap()), - new BsonJavaScript(command.getReduce()), - command.getOutputTarget(), - getWriteConcern()) + new MapReduceToCollectionOperation( + getClientSideOperationTimeout(command.getMaxTime(MILLISECONDS)), + getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()), + command.getOutputTarget(), getWriteConcern()) .filter(wrapAllowNull(command.getQuery())) .limit(command.getLimit()) - .maxTime(command.getMaxTime(MILLISECONDS), MILLISECONDS) .jsMode(jsMode != null && jsMode) .sort(wrapAllowNull(command.getSort())) .verbose(command.isVerbose()) @@ -1221,12 +1222,13 @@ public Cursor aggregate(final List pipeline, final Aggregati BsonValue outCollection = stages.get(stages.size() - 1).get("$out"); if (outCollection != null) { - AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), stages, - getReadConcern(), getWriteConcern()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .allowDiskUse(options.getAllowDiskUse()) - .bypassDocumentValidation(options.getBypassDocumentValidation()) - .collation(options.getCollation()); + AggregateToCollectionOperation operation = + new AggregateToCollectionOperation( + getClientSideOperationTimeout(options.getMaxTime(MILLISECONDS)), + getNamespace(), stages, getReadConcern(), getWriteConcern()) + .allowDiskUse(options.getAllowDiskUse()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()); try { executor.execute(operation, getReadPreference(), getReadConcern()); result = new DBCursor(database.getCollection(outCollection.asString().getValue()), new BasicDBObject(), @@ -1235,8 +1237,9 @@ public Cursor aggregate(final List pipeline, final Aggregati throw createWriteConcernException(e); } } else { - AggregateOperation operation = new AggregateOperation<>(getNamespace(), stages, getDefaultDBObjectCodec()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) + AggregateOperation operation = new AggregateOperation<>( + getClientSideOperationTimeout(options.getMaxTime(MILLISECONDS)), getNamespace(), stages, + getDefaultDBObjectCodec()) .allowDiskUse(options.getAllowDiskUse()) .batchSize(options.getBatchSize()) .collation(options.getCollation()) @@ -1258,12 +1261,12 @@ public Cursor aggregate(final List pipeline, final Aggregati * @mongodb.server.release 3.6 */ public CommandResult explainAggregate(final List pipeline, final AggregationOptions options) { - AggregateOperation operation = new AggregateOperation<>(getNamespace(), preparePipeline(pipeline), - new BsonDocumentCodec()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) - .allowDiskUse(options.getAllowDiskUse()) - .collation(options.getCollation()) - .retryReads(retryReads); + AggregateOperation operation = new AggregateOperation<>( + getClientSideOperationTimeout(options.getMaxTime(MILLISECONDS)), getNamespace(), + preparePipeline(pipeline), new BsonDocumentCodec()) + .allowDiskUse(options.getAllowDiskUse()) + .collation(options.getCollation()) + .retryReads(retryReads); return new CommandResult(executor.execute(operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), primaryPreferred(), getReadConcern()), getDefaultDBObjectCodec()); } @@ -1648,12 +1651,12 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod WriteConcern optionsWriteConcern = options.getWriteConcern(); WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); WriteOperation operation; + ClientSideOperationTimeout clientSideOperationTimeout = getClientSideOperationTimeout(options.getMaxTime(MILLISECONDS)); if (options.isRemove()) { - operation = new FindAndDeleteOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec) + operation = new FindAndDeleteOperation<>(clientSideOperationTimeout, getNamespace(), writeConcern, retryWrites, objectCodec) .filter(wrapAllowNull(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .collation(options.getCollation()); } else { DBObject update = options.getUpdate(); @@ -1661,26 +1664,24 @@ public DBObject findAndModify(final DBObject query, final DBCollectionFindAndMod throw new IllegalArgumentException("update can not be null unless it's a remove"); } if (!update.keySet().isEmpty() && update.keySet().iterator().next().charAt(0) == '$') { - operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, - wrap(update)) + operation = new FindAndUpdateOperation<>(clientSideOperationTimeout, getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) .returnOriginal(!options.returnNew()) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()) .arrayFilters(wrapAllowNull(options.getArrayFilters(), (Encoder) null)); } else { - operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec, - wrap(update)) + operation = new FindAndReplaceOperation<>(clientSideOperationTimeout, getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) .filter(wrap(query)) .projection(wrapAllowNull(options.getProjection())) .sort(wrapAllowNull(options.getSort())) .returnOriginal(!options.returnNew()) .upsert(options.isUpsert()) - .maxTime(options.getMaxTime(MILLISECONDS), MILLISECONDS) .bypassDocumentValidation(options.getBypassDocumentValidation()) .collation(options.getCollation()); } @@ -1787,7 +1788,8 @@ public ReadConcern getReadConcern() { */ public void drop() { try { - executor.execute(new DropCollectionOperation(getNamespace(), getWriteConcern()), getReadConcern()); + executor.execute(new DropCollectionOperation(getClientSideOperationTimeout(), getNamespace(), + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1851,10 +1853,12 @@ public synchronized void setDBEncoderFactory(@Nullable final DBEncoderFactory fa * @mongodb.driver.manual core/indexes/ Indexes */ public List getIndexInfo() { - return new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), retryReads) { + return new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, + primary(), retryReads, null) { @Override public ReadOperation> asReadOperation() { - return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()).retryReads(retryReads); + return new ListIndexesOperation<>(getClientSideOperationTimeout(), getNamespace(), + getDefaultDBObjectCodec()).retryReads(retryReads); } }.into(new ArrayList<>()); } @@ -1869,7 +1873,8 @@ public ReadOperation> asReadOperation() { */ public void dropIndex(final DBObject index) { try { - executor.execute(new DropIndexOperation(getNamespace(), wrap(index), getWriteConcern()), getReadConcern()); + executor.execute(new DropIndexOperation(getClientSideOperationTimeout(), getNamespace(), wrap(index), + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -1884,7 +1889,8 @@ public void dropIndex(final DBObject index) { */ public void dropIndex(final String indexName) { try { - executor.execute(new DropIndexOperation(getNamespace(), indexName, getWriteConcern()), getReadConcern()); + executor.execute(new DropIndexOperation(getClientSideOperationTimeout(), getNamespace(), indexName, + getWriteConcern()), getReadConcern()); } catch (MongoWriteConcernException e) { throw createWriteConcernException(e); } @@ -2018,9 +2024,9 @@ BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean b final List writeRequests, final WriteConcern writeConcern) { try { - return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(getNamespace(), - translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) - .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); + return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation(getClientSideOperationTimeout(), + getNamespace(), translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) + .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); } catch (MongoBulkWriteException e) { throw BulkWriteHelper.translateBulkWriteException(e, MongoClient.getDefaultCodecRegistry().get(DBObject.class)); } @@ -2134,7 +2140,7 @@ private CreateIndexesOperation createIndexOperation(final DBObject key, final DB if (options.containsField("collation")) { request.collation(DBObjectCollationHelper.createCollationFromOptions(options)); } - return new CreateIndexesOperation(getNamespace(), singletonList(request), writeConcern); + return new CreateIndexesOperation(getClientSideOperationTimeout(), getNamespace(), singletonList(request), writeConcern); } Codec getObjectCodec() { @@ -2195,6 +2201,19 @@ BsonDocument wrap(final DBObject document, @Nullable final Encoder enc } } + private ClientSideOperationTimeout getClientSideOperationTimeout(){ + return ClientSideOperationTimeouts.create(database.getTimeoutMS()); + } + + private ClientSideOperationTimeout getClientSideOperationTimeout(final long maxTimeMS){ + return ClientSideOperationTimeouts.create(database.getTimeoutMS(), maxTimeMS); + } + + ClientSideOperationTimeout getClientSideOperationTimeout(final long maxTimeMS, final long maxAwaitTimeMS){ + return ClientSideOperationTimeouts.create(database.getTimeoutMS(), maxTimeMS, maxAwaitTimeMS); + } + + static WriteConcernException createWriteConcernException(final MongoWriteConcernException e) { return new WriteConcernException(new BsonDocument("code", new BsonInt32(e.getWriteConcernError().getCode())) .append("errmsg", new BsonString(e.getWriteConcernError().getMessage())), diff --git a/driver-legacy/src/main/com/mongodb/DBCursor.java b/driver-legacy/src/main/com/mongodb/DBCursor.java index e9c210e0b1a..83e86a18c36 100644 --- a/driver-legacy/src/main/com/mongodb/DBCursor.java +++ b/driver-legacy/src/main/com/mongodb/DBCursor.java @@ -429,32 +429,31 @@ public DBCursor partial(final boolean partial) { @SuppressWarnings("deprecation") private FindOperation getQueryOperation(final Decoder decoder) { - - return new FindOperation<>(collection.getNamespace(), decoder) - .filter(collection.wrapAllowNull(filter)) - .batchSize(findOptions.getBatchSize()) - .skip(findOptions.getSkip()) - .limit(findOptions.getLimit()) - .maxAwaitTime(findOptions.getMaxAwaitTime(MILLISECONDS), MILLISECONDS) - .maxTime(findOptions.getMaxTime(MILLISECONDS), MILLISECONDS) - .projection(collection.wrapAllowNull(findOptions.getProjection())) - .sort(collection.wrapAllowNull(findOptions.getSort())) - .collation(findOptions.getCollation()) - .comment(findOptions.getComment() != null - ? new BsonString(findOptions.getComment()) : null) - .hint(findOptions.getHint() != null - ? collection.wrapAllowNull(findOptions.getHint()) - : (findOptions.getHintString() != null - ? new BsonString(findOptions.getHintString()) : null)) - .min(collection.wrapAllowNull(findOptions.getMin())) - .max(collection.wrapAllowNull(findOptions.getMax())) - .cursorType(findOptions.getCursorType()) - .noCursorTimeout(findOptions.isNoCursorTimeout()) - .oplogReplay(findOptions.isOplogReplay()) - .partial(findOptions.isPartial()) - .returnKey(findOptions.isReturnKey()) - .showRecordId(findOptions.isShowRecordId()) - .retryReads(retryReads); + return new FindOperation<>( + collection.getClientSideOperationTimeout(findOptions.getMaxTime(MILLISECONDS), findOptions.getMaxAwaitTime(MILLISECONDS)), + collection.getNamespace(), decoder) + .filter(collection.wrapAllowNull(filter)) + .batchSize(findOptions.getBatchSize()) + .skip(findOptions.getSkip()) + .limit(findOptions.getLimit()) + .projection(collection.wrapAllowNull(findOptions.getProjection())) + .sort(collection.wrapAllowNull(findOptions.getSort())) + .collation(findOptions.getCollation()) + .comment(findOptions.getComment() != null + ? new BsonString(findOptions.getComment()) : null) + .hint(findOptions.getHint() != null + ? collection.wrapAllowNull(findOptions.getHint()) + : (findOptions.getHintString() != null + ? new BsonString(findOptions.getHintString()) : null)) + .min(collection.wrapAllowNull(findOptions.getMin())) + .max(collection.wrapAllowNull(findOptions.getMax())) + .cursorType(findOptions.getCursorType()) + .noCursorTimeout(findOptions.isNoCursorTimeout()) + .oplogReplay(findOptions.isOplogReplay()) + .partial(findOptions.isPartial()) + .returnKey(findOptions.isReturnKey()) + .showRecordId(findOptions.isShowRecordId()) + .retryReads(retryReads); } /** diff --git a/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java index 4d8eb22cb7a..721e6ccfc7e 100644 --- a/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java +++ b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java @@ -19,6 +19,7 @@ import com.mongodb.bulk.BulkWriteError; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.bulk.WriteConcernError; +import com.mongodb.internal.ClientSideOperationTimeout; import com.mongodb.internal.binding.WriteBinding; import com.mongodb.internal.bulk.DeleteRequest; import com.mongodb.internal.bulk.InsertRequest; @@ -47,6 +48,7 @@ * Operation for bulk writes for the legacy API. */ final class LegacyMixedBulkWriteOperation implements WriteOperation { + private final ClientSideOperationTimeout clientSideOperationTimeout; private final WriteConcern writeConcern; private final MongoNamespace namespace; private final List writeRequests; @@ -55,31 +57,41 @@ final class LegacyMixedBulkWriteOperation implements WriteOperation insertRequests) { - return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, insertRequests, INSERT); + static LegacyMixedBulkWriteOperation createBulkWriteOperationForInsert(final ClientSideOperationTimeout clientSideOperationTimeout, + final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, + final List insertRequests) { + return new LegacyMixedBulkWriteOperation(clientSideOperationTimeout, namespace, ordered, writeConcern, retryWrites, insertRequests, + INSERT); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForUpdate(final MongoNamespace namespace, final boolean ordered, - final WriteConcern writeConcern, final boolean retryWrites, final List updateRequests) { + static LegacyMixedBulkWriteOperation createBulkWriteOperationForUpdate(final ClientSideOperationTimeout clientSideOperationTimeout, + final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, + final List updateRequests) { assertTrue(updateRequests.stream().allMatch(updateRequest -> updateRequest.getType() == UPDATE)); - return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, updateRequests, UPDATE); + return new LegacyMixedBulkWriteOperation(clientSideOperationTimeout, namespace, ordered, writeConcern, retryWrites, updateRequests, + UPDATE); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForReplace(final MongoNamespace namespace, final boolean ordered, - final WriteConcern writeConcern, final boolean retryWrites, final List replaceRequests) { + static LegacyMixedBulkWriteOperation createBulkWriteOperationForReplace(final ClientSideOperationTimeout clientSideOperationTimeout, + final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, + final List replaceRequests) { assertTrue(replaceRequests.stream().allMatch(updateRequest -> updateRequest.getType() == REPLACE)); - return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, replaceRequests, REPLACE); + return new LegacyMixedBulkWriteOperation(clientSideOperationTimeout, namespace, ordered, writeConcern, retryWrites, replaceRequests, + REPLACE); } - static LegacyMixedBulkWriteOperation createBulkWriteOperationForDelete(final MongoNamespace namespace, final boolean ordered, - final WriteConcern writeConcern, final boolean retryWrites, final List deleteRequests) { - return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, deleteRequests, DELETE); + static LegacyMixedBulkWriteOperation createBulkWriteOperationForDelete(final ClientSideOperationTimeout clientSideOperationTimeout, + final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, + final List deleteRequests) { + return new LegacyMixedBulkWriteOperation(clientSideOperationTimeout, namespace, ordered, writeConcern, retryWrites, deleteRequests, + DELETE); } - private LegacyMixedBulkWriteOperation(final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, + private LegacyMixedBulkWriteOperation(final ClientSideOperationTimeout clientSideOperationTimeout, final MongoNamespace namespace, + final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites, final List writeRequests, final WriteRequest.Type type) { isTrueArgument("writeRequests not empty", !writeRequests.isEmpty()); + this.clientSideOperationTimeout = notNull("clientSideOperationTimeout", clientSideOperationTimeout); this.writeRequests = notNull("writeRequests", writeRequests); this.type = type; this.ordered = ordered; @@ -100,8 +112,8 @@ LegacyMixedBulkWriteOperation bypassDocumentValidation(@Nullable final Boolean b @Override public WriteConcernResult execute(final WriteBinding binding) { try { - BulkWriteResult result = new MixedBulkWriteOperation(namespace, writeRequests, ordered, writeConcern, retryWrites) - .bypassDocumentValidation(bypassDocumentValidation).execute(binding); + BulkWriteResult result = new MixedBulkWriteOperation(clientSideOperationTimeout, namespace, writeRequests, + ordered, writeConcern, retryWrites).bypassDocumentValidation(bypassDocumentValidation).execute(binding); if (result.wasAcknowledged()) { return translateBulkWriteResult(result); } else { diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy index 3073b7968a8..2a7bd66d293 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy @@ -63,6 +63,7 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForDelete import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForUpdate @@ -270,9 +271,9 @@ class DBCollectionSpecification extends Specification { collection.getStats() then: - expect executor.getReadOperation(), isTheSameAs(new CommandReadOperation('myDatabase', - new BsonDocument('collStats', new BsonString('test')), - new BsonDocumentCodec())) + expect executor.getReadOperation(), isTheSameAs(new CommandReadOperation(CSOT_NO_TIMEOUT.get(), 'myDatabase', + new BsonDocument('collStats', new BsonString('test')), + new BsonDocumentCodec())) executor.getReadPreference() == collection.getReadPreference() } @@ -290,7 +291,8 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -299,7 +301,8 @@ class DBCollectionSpecification extends Specification { collection.find().iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .retryReads(true)) @@ -308,7 +311,8 @@ class DBCollectionSpecification extends Specification { collection.find(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)).iterator().hasNext() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .collation(collation) .retryReads(true)) @@ -330,7 +334,8 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .retryReads(true)) @@ -340,7 +345,8 @@ class DBCollectionSpecification extends Specification { collection.findOne() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .retryReads(true)) @@ -350,7 +356,8 @@ class DBCollectionSpecification extends Specification { collection.findOne(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) .filter(new BsonDocument()) .limit(-1) .collation(collation) @@ -370,8 +377,8 @@ class DBCollectionSpecification extends Specification { collection.findAndRemove(query) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(collection.getNamespace(), - WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) + expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) } def 'findAndModify should create the correct FindAndUpdateOperation'() { @@ -390,8 +397,8 @@ class DBCollectionSpecification extends Specification { collection.findAndModify(query, update) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), - WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonUpdate) + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), + collection.getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonUpdate) .filter(new BsonDocument())) when: // With options @@ -399,10 +406,11 @@ class DBCollectionSpecification extends Specification { .arrayFilters(dbObjectArrayFilters).writeConcern(WriteConcern.W3)) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), WriteConcern.W3, - retryWrites, collection.getObjectCodec(), bsonUpdate) + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), + collection.getNamespace(), WriteConcern.W3, retryWrites, collection.getObjectCodec(), bsonUpdate) .filter(new BsonDocument()) - .collation(collation).arrayFilters(bsonDocumentWrapperArrayFilters)) + .collation(collation) + .arrayFilters(bsonDocumentWrapperArrayFilters)) where: dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] @@ -426,8 +434,8 @@ class DBCollectionSpecification extends Specification { collection.findAndModify(query, replace) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection.getNamespace(), - WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) .filter(new BsonDocument())) when: // With options @@ -435,8 +443,8 @@ class DBCollectionSpecification extends Specification { .writeConcern(WriteConcern.W3)) then: - expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection.getNamespace(), WriteConcern.W3, - retryWrites, collection.getObjectCodec(), bsonReplace) + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), + collection.getNamespace(), WriteConcern.W3, retryWrites, collection.getObjectCodec(), bsonReplace) .filter(new BsonDocument()) .collation(collation)) } @@ -451,7 +459,7 @@ class DBCollectionSpecification extends Specification { collection.count() then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) when: // Inherits from DB @@ -460,7 +468,7 @@ class DBCollectionSpecification extends Specification { executor.getReadConcern() == ReadConcern.MAJORITY then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -469,7 +477,7 @@ class DBCollectionSpecification extends Specification { collection.count(new BasicDBObject(), new DBCollectionCountOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace()) .filter(new BsonDocument()).retryReads(true) .collation(collation)) executor.getReadConcern() == ReadConcern.LOCAL @@ -496,8 +504,8 @@ class DBCollectionSpecification extends Specification { then: distinctFieldValues == [1, 2] - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) - .filter(new BsonDocument()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), 'field1', + new BsonValueCodec()).filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT when: // Inherits from DB @@ -505,7 +513,8 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1') then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), 'field1', + new BsonValueCodec()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -514,8 +523,8 @@ class DBCollectionSpecification extends Specification { collection.distinct('field1', new DBCollectionDistinctOptions().collation(collation)) then: - expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', new BsonValueCodec()) - .collation(collation).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), 'field1', + new BsonValueCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -534,8 +543,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) executor.getReadConcern() == ReadConcern.DEFAULT @@ -546,8 +555,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument())) executor.getReadConcern() == ReadConcern.LOCAL @@ -561,8 +570,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getReadOperation(), isTheSameAs( - new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - collection.getDefaultDBObjectCodec()) + new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) .verbose(true) .filter(new BsonDocument()) .collation(collation)) @@ -581,8 +590,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) ) @@ -592,8 +601,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) ) @@ -606,8 +615,8 @@ class DBCollectionSpecification extends Specification { then: expect executor.getWriteOperation(), isTheSameAs( - new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), new BsonJavaScript('reduce'), - 'myColl', collection.getWriteConcern()) + new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) .verbose(true) .filter(new BsonDocument()) .collation(collation) @@ -630,8 +639,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.DEFAULT when: // Inherits from DB @@ -639,8 +648,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY when: @@ -648,8 +657,8 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) executor.getReadConcern() == ReadConcern.LOCAL } @@ -665,21 +674,21 @@ class DBCollectionSpecification extends Specification { collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) when: // Inherits from DB collection.aggregate(pipeline, AggregationOptions.builder().build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) when: collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), bsonPipeline, collection.getReadConcern(), collection.getWriteConcern()).collation(collation)) } @@ -697,8 +706,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) when: // Inherits from DB @@ -706,8 +715,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) when: @@ -715,8 +724,8 @@ class DBCollectionSpecification extends Specification { collection.explainAggregate(pipeline, options) then: - expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), bsonPipeline, - collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) } @@ -736,8 +745,8 @@ class DBCollectionSpecification extends Specification { collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + true, WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) when: // Inherits from DB db.setWriteConcern(WriteConcern.W3) @@ -745,8 +754,8 @@ class DBCollectionSpecification extends Specification { then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.W3, retryWrites, asList(updateRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + true, WriteConcern.W3, retryWrites, asList(updateRequest))) when: collection.setWriteConcern(WriteConcern.W1) @@ -755,8 +764,8 @@ class DBCollectionSpecification extends Specification { new DBCollectionUpdateOptions().collation(collation).arrayFilters(dbObjectArrayFilters)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), true, - WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + true, WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) where: dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] @@ -778,16 +787,16 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + false, WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) when: // Inherits from DB db.setWriteConcern(WriteConcern.W3) collection.remove(BasicDBObject.parse(query)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.W3, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + false, WriteConcern.W3, retryWrites, asList(deleteRequest))) when: collection.setWriteConcern(WriteConcern.W1) @@ -795,8 +804,8 @@ class DBCollectionSpecification extends Specification { collection.remove(BasicDBObject.parse(query), new DBCollectionRemoveOptions().collation(collation)) then: - expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), false, - WriteConcern.W1, retryWrites, asList(deleteRequest))) + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + false, WriteConcern.W1, retryWrites, asList(deleteRequest))) } def 'should create the correct MixedBulkWriteOperation'() { @@ -827,7 +836,8 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + writeRequests, ordered, WriteConcern.ACKNOWLEDGED, false)) when: // Inherits from DB @@ -835,16 +845,16 @@ class DBCollectionSpecification extends Specification { bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, - WriteConcern.W3, false)) + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + writeRequests, ordered, WriteConcern.W3, false)) when: collection.setWriteConcern(WriteConcern.W1) bulk().execute() then: - expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), writeRequests, ordered, - WriteConcern.W1, false)) + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + writeRequests, ordered, WriteConcern.W1, false)) where: ordered << [true, false, true] diff --git a/driver-legacy/src/test/functional/com/mongodb/DBTest.java b/driver-legacy/src/test/functional/com/mongodb/DBTest.java index 6f5c4df1c44..2bc774a796c 100644 --- a/driver-legacy/src/test/functional/com/mongodb/DBTest.java +++ b/driver-legacy/src/test/functional/com/mongodb/DBTest.java @@ -31,6 +31,7 @@ import java.util.Locale; import java.util.UUID; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; import static com.mongodb.ClusterFixture.getBinding; @@ -344,7 +345,7 @@ public void shouldApplyUuidRepresentationToCommandEncodingAndDecoding() { } BsonDocument getCollectionInfo(final String collectionName) { - return new ListCollectionsOperation<>(getDefaultDatabaseName(), new BsonDocumentCodec()) + return new ListCollectionsOperation<>(CSOT_NO_TIMEOUT.get(), getDefaultDatabaseName(), new BsonDocumentCodec()) .filter(new BsonDocument("name", new BsonString(collectionName))).execute(getBinding()).next().get(0); } } diff --git a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy index 227126b1160..b478803128b 100644 --- a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy +++ b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy @@ -28,6 +28,7 @@ import org.bson.codecs.DocumentCodec import org.bson.types.ObjectId import spock.lang.IgnoreIf +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.ClusterFixture.getBinding import static com.mongodb.ClusterFixture.getSingleConnectionBinding import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet @@ -46,7 +47,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should throw IllegalArgumentException for empty list of requests'() { when: - createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, []) + createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, true, []) then: thrown(IllegalArgumentException) @@ -56,7 +57,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec given: def inserts = [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))] - def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, inserts) + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, inserts) when: def result = execute(operation) @@ -73,7 +74,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should insert a single document'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) when: execute(operation) @@ -85,7 +86,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should execute unacknowledged write'() { given: def binding = getSingleConnectionBinding() - def operation = createBulkWriteOperationForInsert(getNamespace(), true, UNACKNOWLEDGED, false, + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, UNACKNOWLEDGED, false, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))]) @@ -107,7 +108,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), ] - def operation = createBulkWriteOperationForInsert(getNamespace(), false, ACKNOWLEDGED, false, documents) + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), false, ACKNOWLEDGED, false, documents) when: execute(operation) @@ -124,7 +125,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), ] - def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, documents) + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, documents) when: execute(operation) @@ -138,7 +139,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should support retryable writes'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, asList(insert)) + def operation = createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, true, asList(insert)) when: executeWithSession(operation, false) @@ -150,7 +151,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should remove a document'() { given: getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) - def operation = createBulkWriteOperationForDelete(getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForDelete(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1)))]) when: @@ -167,7 +168,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should return correct result for replace'() { given: def replacement = new UpdateRequest(new BsonDocument(), new BsonDocument('_id', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: def result = execute(operation) @@ -182,11 +184,13 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should replace a single document'() { given: def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) - createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)).execute(getBinding()) + createBulkWriteOperationForInsert(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + .execute(getBinding()) def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: def result = execute(operation) @@ -205,7 +209,8 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) .upsert(true) - def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, false, asList(replacement)) + def operation = createBulkWriteOperationForReplace(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) when: execute(operation) @@ -216,9 +221,9 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should update nothing if no documents match'() { given: - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, - asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), - new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) + def operation = createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, + false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) when: WriteConcernResult result = execute(operation) @@ -236,7 +241,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('x', 1)) - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) @@ -256,7 +261,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', 1), new Document('x', 1)) - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(true))) @@ -273,7 +278,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'when upsert is true should insert a document if there are no matching documents'() { given: - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).upsert(true))) @@ -291,7 +296,7 @@ class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpec def 'should return correct result for upsert'() { given: def id = new ObjectId() - def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + def operation = createBulkWriteOperationForUpdate(CSOT_NO_TIMEOUT.get(), getNamespace(), true, ACKNOWLEDGED, false, asList(new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), UPDATE).upsert(true))) diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy index 768dd52d7ed..b2b835861f9 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy @@ -28,7 +28,10 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME_AND_MAX_AWAIT_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static spock.util.matcher.HamcrestSupport.expect @@ -122,10 +125,11 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) - .filter(new BsonDocument()) - .projection(new BsonDocument()) - .retryReads(true)) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true)) } @@ -140,11 +144,13 @@ class DBCursorSpecification extends Specification { cursor.one() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) - .limit(-1) - .filter(new BsonDocument()) - .projection(new BsonDocument()) - .retryReads(true)) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace(), collection.getObjectCodec()) + .limit(-1) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true) + ) } def 'DBCursor methods should be used to create the expected operation'() { @@ -167,7 +173,7 @@ class DBCursorSpecification extends Specification { .batchSize(1) .cursorType(cursorType) .limit(1) - .maxTime(1, TimeUnit.MILLISECONDS) + .maxTime(100, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .oplogReplay(true) .partial(true) @@ -178,13 +184,13 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(CSOT_MAX_TIME.get(), collection.getNamespace(), collection.getObjectCodec()) .batchSize(1) .collation(collation) .cursorType(cursorType) .filter(bsonFilter) .limit(1) - .maxTime(1, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .oplogReplay(true) .partial(true) @@ -223,8 +229,8 @@ class DBCursorSpecification extends Specification { .collation(collation) .cursorType(cursorType) .limit(1) - .maxAwaitTime(1, TimeUnit.MILLISECONDS) - .maxTime(1, TimeUnit.MILLISECONDS) + .maxAwaitTime(1001, TimeUnit.MILLISECONDS) + .maxTime(101, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .oplogReplay(true) .partial(true) @@ -246,14 +252,13 @@ class DBCursorSpecification extends Specification { cursor.toArray() then: - expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), + collection.getNamespace(), collection.getObjectCodec()) .batchSize(1) .collation(collation) .cursorType(cursorType) .filter(bsonFilter) .limit(1) - .maxAwaitTime(1, TimeUnit.MILLISECONDS) - .maxTime(1, TimeUnit.MILLISECONDS) .noCursorTimeout(true) .oplogReplay(true) .partial(true) @@ -283,7 +288,7 @@ class DBCursorSpecification extends Specification { then: result == 42 - expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -299,7 +304,7 @@ class DBCursorSpecification extends Specification { then: result == 42 - expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + expect executor.getReadOperation(), isTheSameAs(new CountOperation(CSOT_NO_TIMEOUT.get(), collection.getNamespace()) .filter(new BsonDocument()).retryReads(true)) executor.getReadConcern() == ReadConcern.MAJORITY } diff --git a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy index b54daabc072..e506b0b804c 100644 --- a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy @@ -35,6 +35,7 @@ import org.bson.BsonDouble import spock.lang.Specification import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry import static spock.util.matcher.HamcrestSupport.expect @@ -84,7 +85,7 @@ class DBSpecification extends Specification { then: def operation = executor.getWriteOperation() as CreateCollectionOperation - expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern())) + expect operation, isTheSameAs(new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), 'test', 'ctest', db.getWriteConcern())) executor.getReadConcern() == ReadConcern.MAJORITY when: @@ -104,7 +105,7 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) + expect operation, isTheSameAs(new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), 'test', 'ctest', db.getWriteConcern()) .sizeInBytes(100000) .maxDocuments(2000) .capped(true) @@ -132,7 +133,8 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()).collation(collation)) + expect operation, isTheSameAs(new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), 'test', 'ctest', db.getWriteConcern()) + .collation(collation)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -160,7 +162,7 @@ class DBSpecification extends Specification { then: def operation = executor.getWriteOperation() as CreateViewOperation - expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(CSOT_NO_TIMEOUT.get(), databaseName, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) executor.getReadConcern() == ReadConcern.MAJORITY @@ -169,7 +171,7 @@ class DBSpecification extends Specification { operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(CSOT_NO_TIMEOUT.get(), databaseName, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) executor.getReadConcern() == ReadConcern.MAJORITY } @@ -189,7 +191,8 @@ class DBSpecification extends Specification { def operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) + expect operation, isTheSameAs(new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) when: @@ -197,7 +200,8 @@ class DBSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: - expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, new DBObjectCodec(getDefaultCodecRegistry())) + expect operation, isTheSameAs(new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) .nameOnly(true)) } diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy index c20fbabfb58..0aee1c788b8 100644 --- a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy @@ -339,7 +339,7 @@ class MongoClientSpecification extends Specification { then: expect database, isTheSameAs(new MongoDatabaseImpl('name', client.getCodecRegistry(), secondary(), - WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, STANDARD, null, + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, STANDARD, null, null, client.getOperationExecutor())) } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java index 635547ef7f7..f39a4558788 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java @@ -45,12 +45,14 @@ import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.reactivestreams.Publisher; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoCollection interface. @@ -107,6 +109,36 @@ public interface MongoCollection { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -156,6 +188,22 @@ public interface MongoCollection { */ MongoCollection withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets an estimate of the count of documents in a collection using collection metadata. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java index 9fb6c765108..440bde68956 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java @@ -22,12 +22,14 @@ import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import org.reactivestreams.Publisher; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoDatabase interface. @@ -74,6 +76,36 @@ public interface MongoDatabase { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -114,6 +146,22 @@ public interface MongoDatabase { */ MongoDatabase withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 4.x + * @see #getTimeout + */ + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets a collection. * diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java index 9594a9ad533..115f7090908 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java @@ -23,6 +23,7 @@ import com.mongodb.ReadConcern; import com.mongodb.TransactionOptions; import com.mongodb.WriteConcern; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.operation.AbortTransactionOperation; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; @@ -144,9 +145,11 @@ public Publisher commitTransaction() { commitInProgress = true; return executor.execute( - new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()), alreadyCommitted) - .recoveryToken(getRecoveryToken()) - .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS), + new CommitTransactionOperation( + // TODO (CSOT) - JAVA-4067 + ClientSideOperationTimeouts.withMaxCommitMS(null, transactionOptions.getMaxCommitTime(MILLISECONDS)), + assertNotNull(transactionOptions.getWriteConcern()), alreadyCommitted) + .recoveryToken(getRecoveryToken()), readConcern, this) .doOnTerminate(() -> { commitInProgress = false; @@ -176,7 +179,10 @@ public Publisher abortTransaction() { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } return executor.execute( - new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern())) + new AbortTransactionOperation( + // TODO (CSOT) - JAVA-4067 + ClientSideOperationTimeouts.withMaxCommitMS(null, transactionOptions.getMaxCommitTime(MILLISECONDS)), + assertNotNull(transactionOptions.getWriteConcern())) .recoveryToken(getRecoveryToken()), readConcern, this) .onErrorResume(Throwable.class, (e) -> Mono.empty()) diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java index ff8c0692d0b..73de206acd0 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java @@ -103,6 +103,7 @@ private MongoClientImpl(final MongoClientSettings settings, final MongoDriverInf settings.getRetryWrites(), settings.getRetryReads(), settings.getUuidRepresentation(), settings.getAutoEncryptionSettings(), + null, // TODO JAVA-4064 this.executor); this.closed = new AtomicBoolean(); BsonDocument clientMetadataDocument = createClientMetadataDocument(settings.getApplicationName(), mongoDriverInformation); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java index 953b45ac9ac..f61142d64ca 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java @@ -62,6 +62,7 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.notNull; @@ -105,6 +106,12 @@ public ReadConcern getReadConcern() { return mongoOperationPublisher.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return (timeoutMS != null) ? notNull("timeUnit", timeUnit).convert(timeoutMS, TimeUnit.MILLISECONDS) : null; + } + MongoOperationPublisher getPublisherHelper() { return mongoOperationPublisher; } @@ -134,6 +141,11 @@ public MongoCollection withReadConcern(final ReadConcern readConcern) { return new MongoCollectionImpl<>(mongoOperationPublisher.withReadConcern(readConcern)); } + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + @Override public Publisher estimatedDocumentCount() { return estimatedDocumentCount(new EstimatedDocumentCountOptions()); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java index 48597289103..0105d392449 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; import static com.mongodb.assertions.Assertions.assertNotNull; @@ -82,6 +83,12 @@ public ReadConcern getReadConcern() { return mongoOperationPublisher.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return (timeoutMS != null) ? notNull("timeUnit", timeUnit).convert(timeoutMS, TimeUnit.MILLISECONDS) : null; + } + MongoOperationPublisher getMongoOperationPublisher() { return mongoOperationPublisher; } @@ -106,6 +113,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new MongoDatabaseImpl(mongoOperationPublisher.withReadConcern(readConcern)); } + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoDatabaseImpl(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + @Override public MongoCollection getCollection(final String collectionName) { return getCollection(collectionName, Document.class); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java index 966dcc8c64f..b74afe27eee 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java @@ -74,6 +74,8 @@ import java.util.HashMap; import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; @@ -95,22 +97,22 @@ public final class MongoOperationPublisher { final Class documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation, @Nullable final AutoEncryptionSettings autoEncryptionSettings, - final OperationExecutor executor) { + @Nullable final Long timeoutMS, final OperationExecutor executor) { this(new MongoNamespace("_ignored", "_ignored"), documentClass, codecRegistry, readPreference, readConcern, writeConcern, retryWrites, retryReads, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } MongoOperationPublisher( final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, @Nullable final Long timeoutMS, final OperationExecutor executor) { this.operations = new AsyncOperations<>(namespace, notNull("documentClass", documentClass), notNull("readPreference", readPreference), notNull("codecRegistry", codecRegistry), notNull("readConcern", readConcern), notNull("writeConcern", writeConcern), - retryWrites, retryReads); + retryWrites, retryReads, timeoutMS); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; this.executor = notNull("executor", executor); @@ -144,6 +146,11 @@ public boolean getRetryReads() { return operations.isRetryReads(); } + @Nullable + public Long getTimeoutMS() { + return operations.getTimeoutMS(); + } + Class getDocumentClass() { return operations.getDocumentClass(); } @@ -175,15 +182,15 @@ MongoOperationPublisher withNamespaceAndDocumentClass(final MongoNamespac return (MongoOperationPublisher) this; } return new MongoOperationPublisher<>(notNull("namespace", namespace), notNull("documentClass", documentClass), - getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), - getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor); + getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutMS(), executor); } MongoOperationPublisher withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), - withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation), - getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), - uuidRepresentation, autoEncryptionSettings, executor); + withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation), + getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutMS(), executor); } MongoOperationPublisher withReadPreference(final ReadPreference readPreference) { @@ -191,9 +198,8 @@ MongoOperationPublisher withReadPreference(final ReadPreference readPreferenc return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), - notNull("readPreference", readPreference), - getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), - uuidRepresentation, autoEncryptionSettings, executor); + notNull("readPreference", readPreference), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutMS(), executor); } MongoOperationPublisher withWriteConcern(final WriteConcern writeConcern) { @@ -201,8 +207,8 @@ MongoOperationPublisher withWriteConcern(final WriteConcern writeConcern) { return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), getReadPreference(), getReadConcern(), - notNull("writeConcern", writeConcern), - getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, executor); + notNull("writeConcern", writeConcern), getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, + getTimeoutMS(), executor); } MongoOperationPublisher withReadConcern(final ReadConcern readConcern) { @@ -210,9 +216,20 @@ MongoOperationPublisher withReadConcern(final ReadConcern readConcern) { return this; } return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), - getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern), - getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, - autoEncryptionSettings, executor); + getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, getTimeoutMS(), executor); + } + + MongoOperationPublisher withTimeout(final long timeout, final TimeUnit timeUnit) { + long timeoutMS = notNull("timeUnit", timeUnit).toMillis(timeout); + if (Objects.equals(getTimeoutMS(), timeoutMS)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), + getCodecRegistry(), getReadPreference(), getReadConcern(), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, timeoutMS, executor); } Publisher dropDatabase(@Nullable final ClientSession clientSession) { diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java index 64d94984b2e..922e07cc2d5 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java @@ -59,6 +59,7 @@ import reactor.core.publisher.Mono; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; @@ -102,6 +103,11 @@ public ReadConcern getReadConcern() { return wrapped.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + @Override public MongoCollection withDocumentClass(final Class clazz) { return new SyncMongoCollection<>(wrapped.withDocumentClass(clazz)); @@ -127,6 +133,11 @@ public MongoCollection withReadConcern(final ReadConcern readConcern) { return new SyncMongoCollection<>(wrapped.withReadConcern(readConcern)); } + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoCollection<>(wrapped.withTimeout(timeout, timeUnit)); + } + @Override public long countDocuments() { return requireNonNull(Mono.from(wrapped.countDocuments()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java index 3dc38b063a4..90843d526f3 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java @@ -35,6 +35,7 @@ import reactor.core.publisher.Mono; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; @@ -76,6 +77,11 @@ public ReadConcern getReadConcern() { return wrapped.getReadConcern(); } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + @Override public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { return new SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)); @@ -96,6 +102,11 @@ public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new SyncMongoDatabase(wrapped.withReadConcern(readConcern)); } + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)); + } + @Override public MongoCollection getCollection(final String collectionName) { return new SyncMongoCollection<>(wrapped.getCollection(collectionName)); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java index 17fb4479e8c..c7b14209ce3 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java @@ -38,11 +38,14 @@ import java.util.List; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME_AND_MAX_AWAIT_TIME; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.lang.String.format; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -58,8 +61,8 @@ void shouldBuildTheExpectedOperation() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, - getDefaultCodecRegistry().get(Document.class)) + AggregateOperation expectedOperation = new AggregateOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -77,17 +80,17 @@ void shouldBuildTheExpectedOperation() { .collation(COLLATION) .comment("my comment") .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateOperation<>(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) .allowDiskUse(true) .batchSize(100) .collation(COLLATION) .comment(new BsonString("my comment")) - .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); @@ -103,7 +106,7 @@ void shouldBuildTheExpectedOperationForHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + AggregateOperation expectedOperation = new AggregateOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -127,7 +130,7 @@ void shouldBuildTheExpectedOperationForHintPlusHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + AggregateOperation expectedOperation = new AggregateOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -155,9 +158,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -174,16 +176,16 @@ void shouldBuildTheExpectedOperationsForDollarOut() { .collation(COLLATION) .comment("my comment") .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) // Ignored on $out - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(101, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateToCollectionOperation(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(COLLATION) .comment(new BsonString("my comment")) - .hint(BsonDocument.parse("{a: 1}")) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -191,12 +193,10 @@ void shouldBuildTheExpectedOperationsForDollarOut() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(CSOT_MAX_TIME.get(), collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) - .maxAwaitTime(0, SECONDS) - .maxTime(0, SECONDS) .comment(new BsonString("my comment")) .retryReads(true); @@ -205,7 +205,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); @@ -215,7 +216,8 @@ void shouldBuildTheExpectedOperationsForDollarOut() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher.toCollection()).blockFirst(); @@ -234,9 +236,8 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher .hintString("x_1"); @@ -262,9 +263,8 @@ void shouldBuildTheExpectedOperationsForDollarOutWithHintPlusHintString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); publisher .hint(new Document("x", 1)) @@ -296,8 +296,8 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION) .toCollection(); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); Flux.from(toCollectionPublisher).blockFirst(); assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); @@ -317,8 +317,8 @@ void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { AggregationLevel.COLLECTION) .toCollection(); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipelineWithNamespace, ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipelineWithNamespace, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); Flux.from(toCollectionPublisher).blockFirst(); assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); @@ -336,9 +336,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -355,16 +354,16 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { .collation(COLLATION) .comment(new BsonInt32(1)) .hint(BsonDocument.parse("{a: 1}")) - .maxAwaitTime(20, SECONDS) // Ignored on $out - .maxTime(10, SECONDS); + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(101, MILLISECONDS); - expectedOperation + expectedOperation = new AggregateToCollectionOperation(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) .allowDiskUse(true) .bypassDocumentValidation(true) .collation(COLLATION) .comment(new BsonInt32(1)) - .hint(BsonDocument.parse("{a: 1}")) - .maxTime(10, SECONDS); + .hint(BsonDocument.parse("{a: 1}")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -372,12 +371,10 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(CSOT_NO_TIMEOUT.get(), collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .batchSize(100) .collation(COLLATION) .filter(new BsonDocument()) - .maxAwaitTime(0, SECONDS) - .maxTime(0, SECONDS) .comment(new BsonInt32(1)) .retryReads(true); @@ -386,7 +383,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle database level aggregations publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); Flux.from(publisher).blockFirst(); operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); @@ -396,7 +394,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeDocument() { // Should handle toCollection publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher.toCollection()).blockFirst(); @@ -415,9 +414,8 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() { AggregatePublisher publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); - AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, - ReadConcern.DEFAULT, - WriteConcern.ACKNOWLEDGED); + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -427,7 +425,7 @@ void shouldBuildTheExpectedOperationsForDollarMergeString() { assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); FindOperation expectedFindOperation = - new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + new FindOperation<>(CSOT_NO_TIMEOUT.get(), collectionNamespace, getDefaultCodecRegistry().get(Document.class)) .filter(new BsonDocument()) .batchSize(Integer.MAX_VALUE) .retryReads(true); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java index d8a0083173c..096ff1866bf 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java @@ -37,10 +37,12 @@ import java.util.List; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME; +import static com.mongodb.ClusterFixture.CSOT_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -57,7 +59,8 @@ void shouldBuildTheExpectedOperation() { Document.class, pipeline, ChangeStreamLevel.COLLECTION); ChangeStreamOperation> expectedOperation = - new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, codec) + new ChangeStreamOperation<>(CSOT_TIMEOUT.get(), NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + codec) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -72,16 +75,16 @@ void shouldBuildTheExpectedOperation() { .batchSize(100) .collation(COLLATION) .comment("comment") - .maxAwaitTime(20, SECONDS) + .maxAwaitTime(101, MILLISECONDS) .fullDocument(FullDocument.UPDATE_LOOKUP); - expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, - codec).retryReads(true); + expectedOperation = new ChangeStreamOperation<>(CSOT_MAX_TIME.get(), NAMESPACE, FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, + pipeline, + codec).retryReads(true); expectedOperation .batchSize(100) .collation(COLLATION) - .comment(new BsonString("comment")) - .maxAwaitTime(20, SECONDS); + .comment(new BsonString("comment")); Flux.from(publisher).blockFirst(); assertEquals(ReadPreference.primary(), executor.getReadPreference()); @@ -102,8 +105,8 @@ void shouldBuildTheExpectedOperationWhenSettingDocumentClass() { .withDocumentClass(BsonDocument.class); ChangeStreamOperation expectedOperation = - new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, - getDefaultCodecRegistry().get(BsonDocument.class)) + new ChangeStreamOperation<>(CSOT_TIMEOUT.get(), NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + getDefaultCodecRegistry().get(BsonDocument.class)) .batchSize(batchSize) .comment(new BsonInt32(1)) .retryReads(true); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java index f9de792574c..ec39e1fafee 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java @@ -28,6 +28,7 @@ import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,7 +44,7 @@ void shouldBuildTheExpectedOperation() { DistinctPublisher publisher = new DistinctPublisherImpl<>(null, createMongoOperationPublisher(executor), fieldName, new Document()); - DistinctOperation expectedOperation = new DistinctOperation<>(NAMESPACE, fieldName, + DistinctOperation expectedOperation = new DistinctOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, fieldName, getDefaultCodecRegistry().get(Document.class)) .retryReads(true).filter(new BsonDocument()); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java index 0b297c13a87..f3c66f042b4 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java @@ -31,10 +31,11 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME_AND_MAX_AWAIT_TIME; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; @SuppressWarnings("deprecation") @@ -51,7 +52,8 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); FindPublisher publisher = new FindPublisherImpl<>(null, createMongoOperationPublisher(executor), new Document()); - FindOperation expectedOperation = new FindOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + FindOperation expectedOperation = new FindOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true) .filter(new BsonDocument()); @@ -67,8 +69,8 @@ void shouldBuildTheExpectedOperation() { .filter(new Document("filter", 1)) .sort(Sorts.ascending("sort")) .projection(new Document("projection", 1)) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) .batchSize(100) .limit(100) .skip(10) @@ -85,7 +87,10 @@ void shouldBuildTheExpectedOperation() { .showRecordId(false) .allowDiskUse(false); - expectedOperation + expectedOperation = new FindOperation<>(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) + .filter(new BsonDocument()) .allowDiskUse(false) .batchSize(100) .collation(COLLATION) @@ -95,8 +100,6 @@ void shouldBuildTheExpectedOperation() { .hint(new BsonString("a_1")) .limit(100) .max(new BsonDocument("max", new BsonInt32(1))) - .maxAwaitTime(20000, MILLISECONDS) - .maxTime(10000, MILLISECONDS) .min(new BsonDocument("min", new BsonInt32(1))) .projection(new BsonDocument("projection", new BsonInt32(1))) .returnKey(false) diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java index c875ab7973c..5ee75677655 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java @@ -26,9 +26,11 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListCollectionsPublisherImplTest extends TestHelper { @@ -42,7 +44,7 @@ void shouldBuildTheExpectedOperation() { ListCollectionsPublisher publisher = new ListCollectionsPublisherImpl<>(null, createMongoOperationPublisher(executor) .withDocumentClass(String.class), true); - ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(CSOT_NO_TIMEOUT.get(), DATABASE_NAME, getDefaultCodecRegistry().get(String.class)) .batchSize(Integer.MAX_VALUE) .nameOnly(true).retryReads(true); @@ -56,12 +58,14 @@ void shouldBuildTheExpectedOperation() { // Should apply settings publisher .filter(new Document("filter", 1)) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .batchSize(100); - expectedOperation + expectedOperation = new ListCollectionsOperation<>(CSOT_MAX_TIME.get(), DATABASE_NAME, + getDefaultCodecRegistry().get(String.class)) + .nameOnly(true) + .retryReads(true) .filter(new BsonDocument("filter", new BsonInt32(1))) - .maxTime(10, SECONDS) .batchSize(100); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java index 749f11b8e0a..6412093f80d 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java @@ -26,9 +26,10 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListDatabasesPublisherImplTest extends TestHelper { @@ -41,7 +42,8 @@ void shouldBuildTheExpectedOperation() { TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); ListDatabasesPublisher publisher = new ListDatabasesPublisherImpl<>(null, createMongoOperationPublisher(executor)); - ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>(getDefaultCodecRegistry().get(Document.class)) + ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>(CSOT_MAX_TIME.get(), + getDefaultCodecRegistry().get(Document.class)) .retryReads(true); // default input should be as expected @@ -54,13 +56,12 @@ void shouldBuildTheExpectedOperation() { publisher .authorizedDatabasesOnly(true) .filter(new Document("filter", 1)) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .batchSize(100); expectedOperation .authorizedDatabasesOnly(true) - .filter(new BsonDocument("filter", new BsonInt32(1))) - .maxTime(10, SECONDS); + .filter(new BsonDocument("filter", new BsonInt32(1))); configureBatchCursor(); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java index 1929c4c3476..492843a748f 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java @@ -25,9 +25,10 @@ import org.junit.jupiter.api.Test; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; public class ListIndexesPublisherImplTest extends TestHelper { @@ -43,7 +44,7 @@ void shouldBuildTheExpectedOperation() { ListIndexesPublisher publisher = new ListIndexesPublisherImpl<>(null, createMongoOperationPublisher(executor)); ListIndexesOperation expectedOperation = - new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + new ListIndexesOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, getDefaultCodecRegistry().get(Document.class)) .batchSize(Integer.MAX_VALUE) .retryReads(true); @@ -54,13 +55,13 @@ void shouldBuildTheExpectedOperation() { assertEquals(ReadPreference.primary(), executor.getReadPreference()); // Should apply settings - publisher - .batchSize(100) - .maxTime(10, SECONDS); + publisher.batchSize(100) + .maxTime(100, MILLISECONDS); - expectedOperation - .batchSize(100) - .maxTime(10, SECONDS); + expectedOperation = + new ListIndexesOperation<>(CSOT_NO_TIMEOUT.get(), NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + .batchSize(100) + .retryReads(true); configureBatchCursor(); Flux.from(publisher).blockFirst(); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java index 451772e5751..54ba064ad88 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java @@ -34,8 +34,11 @@ import org.reactivestreams.Publisher; import reactor.core.publisher.Flux; +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME; +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT; import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -57,9 +60,9 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { com.mongodb.reactivestreams.client.MapReducePublisher publisher = new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION); - MapReduceWithInlineResultsOperation expectedOperation = - new MapReduceWithInlineResultsOperation<>(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), - getDefaultCodecRegistry().get(Document.class)).verbose(true); + MapReduceWithInlineResultsOperation expectedOperation = new MapReduceWithInlineResultsOperation<>( + CSOT_NO_TIMEOUT.get(), NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)).verbose(true); // default input should be as expected Flux.from(publisher).blockFirst(); @@ -78,19 +81,19 @@ void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { .filter(new Document("filter", 1)) .finalizeFunction(FINALIZE_FUNCTION) .limit(999) - .maxTime(10, SECONDS) + .maxTime(100, SECONDS) .scope(new Document("scope", 1)) .sort(Sorts.ascending("sort")) .verbose(false); - expectedOperation - .collation(COLLATION) + expectedOperation = new MapReduceWithInlineResultsOperation<>( + CSOT_MAX_TIME.get(), NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)) + .verbose(true) .collation(COLLATION) .filter(BsonDocument.parse("{filter: 1}")) .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) .limit(999) - .maxTime(10, SECONDS) - .maxTime(10, SECONDS) .scope(new BsonDocument("scope", new BsonInt32(1))) .sort(new BsonDocument("sort", new BsonInt32(1))) .verbose(false); @@ -113,11 +116,9 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION) .collectionName(NAMESPACE.getCollectionName()); - MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, - new BsonJavaScript(MAP_FUNCTION), - new BsonJavaScript(REDUCE_FUNCTION), - NAMESPACE.getCollectionName(), - WriteConcern.ACKNOWLEDGED).verbose(true); + MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(CSOT_NO_TIMEOUT.get(), NAMESPACE, + new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), + WriteConcern.ACKNOWLEDGED).verbose(true); // default input should be as expected Flux.from(publisher.toCollection()).blockFirst(); @@ -131,19 +132,19 @@ void shouldBuildTheExpectedMapReduceToCollectionOperation() { .filter(new Document("filter", 1)) .finalizeFunction(FINALIZE_FUNCTION) .limit(999) - .maxTime(10, SECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document("scope", 1)) .sort(Sorts.ascending("sort")) .verbose(false); - expectedOperation + expectedOperation = new MapReduceToCollectionOperation(CSOT_MAX_TIME.get(), NAMESPACE, new BsonJavaScript(MAP_FUNCTION), + new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED) + .verbose(true) .collation(COLLATION) .bypassDocumentValidation(true) .filter(BsonDocument.parse("{filter: 1}")) .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) .limit(999) - .maxTime(10, SECONDS) - .maxTime(10, SECONDS) .scope(new BsonDocument("scope", new BsonInt32(1))) .sort(new BsonDocument("sort", new BsonInt32(1))) .verbose(false); diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java new file mode 100644 index 00000000000..67cd0761439 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import org.bson.BsonDocument; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + + +public class MongoOperationPublisherTest { + private static final OperationExecutor OPERATION_EXECUTOR = mock(OperationExecutor.class); + private static final MongoNamespace MONGO_NAMESPACE = new MongoNamespace("a.b"); + + private static final MongoOperationPublisher DEFAULT_MOP = new MongoOperationPublisher<>( + MONGO_NAMESPACE, Document.class, MongoClientSettings.getDefaultCodecRegistry(), ReadPreference.primary(), + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, UuidRepresentation.STANDARD, + null, 100L, OPERATION_EXECUTOR); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = DEFAULT_MOP.withCodecRegistry(CodecRegistries.fromCodecs(new MyLongCodec())).getCodecRegistry(); + assertTrue(codecRegistry.get(Long.class) instanceof MyLongCodec); + } + + @Test + public void withDatabase() { + assertEquals(new MongoNamespace("c.ignored"), DEFAULT_MOP.withDatabase("c").getNamespace()); + } + + @Test + public void withDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withDocumentClass(Document.class)); + assertEquals(BsonDocument.class, DEFAULT_MOP.withDocumentClass(BsonDocument.class).getDocumentClass()); + } + + @Test + public void withDatabaseAndDocumentClass() { + MongoOperationPublisher alternative = DEFAULT_MOP.withDatabaseAndDocumentClass("c", BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c.ignored"), alternative.getNamespace()); + } + + @Test + public void withNamespaceAndDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + + MongoOperationPublisher alternative = DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("c.d"), + BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c.d"), alternative.getNamespace()); + } + + + @Test + public void withNamespace() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + assertEquals(new MongoNamespace("c.d"), DEFAULT_MOP.withNamespace(new MongoNamespace("c.d")).getNamespace()); + } + + @Test + public void withReadConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadConcern(ReadConcern.DEFAULT)); + assertEquals(ReadConcern.AVAILABLE, DEFAULT_MOP.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadPreference(ReadPreference.primary())); + assertEquals(ReadPreference.secondaryPreferred(), DEFAULT_MOP.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(100, TimeUnit.MILLISECONDS)); + assertEquals(1000, DEFAULT_MOP.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeoutMS()); + } + + @Test + public void withWriteConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withWriteConcern(WriteConcern.ACKNOWLEDGED)); + assertEquals(WriteConcern.MAJORITY, DEFAULT_MOP.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + + private static class MyLongCodec implements Codec { + + @Override + public Long decode(final BsonReader reader, final DecoderContext decoderContext) { + return 42L; + } + + @Override + public void encode(final BsonWriter writer, final Long value, final EncoderContext encoderContext) { + } + + @Override + public Class getEncoderClass() { + return Long.class; + } + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java index b5f77d39941..c97449cc23a 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java @@ -96,7 +96,7 @@ static MongoOperationPublisher createMongoOperationPublisher(final Ope return new MongoOperationPublisher<>(NAMESPACE, Document.class, getDefaultCodecRegistry(), ReadPreference.primary(), ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, - UuidRepresentation.STANDARD, null, executor); + UuidRepresentation.STANDARD, null, null, executor); } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala index 380c6d272f3..7d97d794c42 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala @@ -34,7 +34,9 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo import org.mongodb.scala.result.{ InsertManyResult, InsertOneResult } import java.util +import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } import scala.reflect.ClassTag case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCollection[T] { @@ -53,6 +55,13 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def getReadConcern: ReadConcern = wrapped.readConcern + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + override def withDocumentClass[NewTDocument](clazz: Class[NewTDocument]): JMongoCollection[NewTDocument] = SyncMongoCollection[NewTDocument]( wrapped.withDocumentClass[NewTDocument]()( @@ -73,6 +82,9 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def withReadConcern(readConcern: ReadConcern): JMongoCollection[T] = SyncMongoCollection[T](wrapped.withReadConcern(readConcern)) + override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withTimeout(Duration(timeout, timeUnit))) + override def countDocuments: Long = wrapped.countDocuments().toFuture().get() override def countDocuments(filter: Bson): Long = wrapped.countDocuments(filter).toFuture().get() @@ -556,7 +568,7 @@ case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCol override def createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition).toFuture().get() - override def createSearchIndexes(searchIndexModels: util.List[SearchIndexModel]): util.List[String] = + override def createSearchIndexes(searchIndexModels: java.util.List[SearchIndexModel]): java.util.List[String] = wrapped.createSearchIndexes(searchIndexModels.asScala.toList).toFuture().get().asJava override def updateSearchIndex(indexName: String, definition: Bson): Unit = diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala index 036d5589957..798710d7bac 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala @@ -24,7 +24,10 @@ import org.bson.conversions.Bson import org.mongodb.scala.MongoDatabase import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import java.lang +import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ +import scala.concurrent.duration.MILLISECONDS import scala.reflect.ClassTag case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { @@ -39,6 +42,13 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { override def getReadConcern: ReadConcern = wrapped.readConcern + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + override def withCodecRegistry(codecRegistry: CodecRegistry) = SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) @@ -48,6 +58,8 @@ case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { override def withReadConcern(readConcern: ReadConcern) = throw new UnsupportedOperationException + override def withTimeout(timeout: Long, timeUnit: TimeUnit) = throw new UnsupportedOperationException + override def getCollection(collectionName: String) = SyncMongoCollection[Document](wrapped.getCollection(collectionName)) diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala index b7afbd613e5..d8b5816b84e 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala @@ -27,6 +27,7 @@ import org.mongodb.scala.model._ import org.mongodb.scala.result._ import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS, TimeUnit } import scala.reflect.ClassTag // scalastyle:off number.of.methods file.size.limit @@ -83,6 +84,28 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul */ lazy val readConcern: ReadConcern = wrapped.getReadConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -136,6 +159,19 @@ case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResul def withReadConcern(readConcern: ReadConcern): MongoCollection[TResult] = MongoCollection(wrapped.withReadConcern(readConcern)) + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCollection instance with the set time limit for operations + * @since 4.x + */ + def withTimeout(timeout: Duration): MongoCollection[TResult] = + MongoCollection(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + /** * Gets an estimate of the count of documents in a collection using collection metadata. * diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala index 99fab96d505..0984e95ba32 100644 --- a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala @@ -23,6 +23,7 @@ import org.mongodb.scala.bson.DefaultHelper.DefaultsTo import org.mongodb.scala.bson.conversions.Bson import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } import scala.reflect.ClassTag /** @@ -69,6 +70,28 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { */ lazy val readConcern: ReadConcern = wrapped.getReadConcern + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 4.x + */ + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -113,6 +136,19 @@ case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { def withReadConcern(readConcern: ReadConcern): MongoDatabase = MongoDatabase(wrapped.withReadConcern(readConcern)) + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCollection instance with the set time limit for operations + * @since 4.x + */ + def withTimeout(timeout: Duration): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + /** * Gets a collection, with a specific default document class. * diff --git a/driver-sync/src/main/com/mongodb/client/MongoCollection.java b/driver-sync/src/main/com/mongodb/client/MongoCollection.java index aa772960e65..6a70b744f7e 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoCollection.java +++ b/driver-sync/src/main/com/mongodb/client/MongoCollection.java @@ -51,6 +51,7 @@ import org.bson.conversions.Bson; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoCollection interface. @@ -112,6 +113,36 @@ public interface MongoCollection { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. * @@ -162,6 +193,22 @@ public interface MongoCollection { */ MongoCollection withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + /** * Counts the number of documents in the collection. * diff --git a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java index bc881cba1d0..cb2b7f739bc 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java +++ b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java @@ -22,11 +22,13 @@ import com.mongodb.annotations.ThreadSafe; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; import org.bson.conversions.Bson; import java.util.List; +import java.util.concurrent.TimeUnit; /** * The MongoDatabase interface. @@ -76,6 +78,36 @@ public interface MongoDatabase { */ ReadConcern getReadConcern(); + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Nullable + Long getTimeout(TimeUnit timeUnit); + /** * Create a new MongoDatabase instance with a different codec registry. * @@ -117,6 +149,22 @@ public interface MongoDatabase { */ MongoDatabase withReadConcern(ReadConcern readConcern); + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 4.x + * @see #getTimeout + */ + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + /** * Gets a collection. * @@ -140,6 +188,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param command the command to be run * @return the command result */ @@ -148,6 +198,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command * @return the command result @@ -157,6 +209,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param command the command to be run * @param resultClass the class to decode each document into * @param the type of the class to use instead of {@code Document}. @@ -167,6 +221,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command * @param resultClass the class to decode each document into @@ -178,6 +234,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @return the command result @@ -189,6 +247,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command @@ -201,6 +261,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param resultClass the class to decode each document into @@ -214,6 +276,8 @@ public interface MongoDatabase { /** * Executes the given command in the context of the current database with the given read preference. * + *

Note: If set the {@link #getTimeout} value will overwrite any {@code maxTimeMS} value in the command.

+ * * @param clientSession the client session with which to associate this operation * @param command the command to be run * @param readPreference the {@link ReadPreference} to be used when executing the command diff --git a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java index 6559e029d4e..462e293483d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java @@ -62,29 +62,25 @@ class AggregateIterableImpl extends MongoIterableImpl documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel) { + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + @Nullable final Long timeoutMS) { this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, aggregationLevel, true); - } - - AggregateIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) { - this(clientSession, new MongoNamespace(databaseName, "ignored"), documentClass, resultClass, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads); + readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads, timeoutMS); } + @SuppressWarnings("checkstyle:ParameterNumber") AggregateIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + @Nullable final Long timeoutMS) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutMS); this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - true, retryReads); + true, retryReads, timeoutMS); this.namespace = notNull("namespace", namespace); this.documentClass = notNull("documentClass", documentClass); this.resultClass = notNull("resultClass", resultClass); diff --git a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java index d50b20cf0e9..95a972dddad 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java @@ -70,23 +70,23 @@ public class ChangeStreamIterableImpl extends MongoIterableImpl pipeline, final Class resultClass, - final ChangeStreamLevel changeStreamLevel, final boolean retryReads) { + final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationExecutor executor, final List pipeline, final Class resultClass, + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, @Nullable final Long timeoutMS) { this(clientSession, new MongoNamespace(databaseName, "ignored"), codecRegistry, readPreference, readConcern, executor, pipeline, - resultClass, changeStreamLevel, retryReads); + resultClass, changeStreamLevel, retryReads, timeoutMS); } public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final OperationExecutor executor, final List pipeline, final Class resultClass, - final ChangeStreamLevel changeStreamLevel, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, @Nullable final Long timeoutMS) { + super(clientSession, executor, readConcern, readPreference, retryReads, null); this.codecRegistry = notNull("codecRegistry", codecRegistry); this.pipeline = notNull("pipeline", pipeline); this.codec = ChangeStreamDocument.createCodec(notNull("resultClass", resultClass), codecRegistry); this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel); - this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(namespace, resultClass, readPreference, codecRegistry, retryReads, timeoutMS); } @Override @@ -128,7 +128,7 @@ public ChangeStreamIterable collation(@Nullable final Collation collati @Override public MongoIterable withDocumentClass(final Class clazz) { - return new MongoIterableImpl(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads()) { + return new MongoIterableImpl(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads(), getTimeoutMS()) { @Override public MongoCursor iterator() { return cursor(); diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java index 4a6afe4101b..477ebc2e993 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java @@ -26,6 +26,7 @@ import com.mongodb.WriteConcern; import com.mongodb.client.ClientSession; import com.mongodb.client.TransactionBody; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.operation.AbortTransactionOperation; import com.mongodb.internal.operation.CommitTransactionOperation; import com.mongodb.internal.operation.ReadOperation; @@ -145,10 +146,13 @@ public void commitTransaction() { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } commitInProgress = true; - delegate.getOperationExecutor().execute(new CommitTransactionOperation(assertNotNull(transactionOptions.getWriteConcern()), + delegate.getOperationExecutor().execute( + new CommitTransactionOperation( + // TODO (CSOT) - JAVA-4067 + ClientSideOperationTimeouts.withMaxCommitMS(null, transactionOptions.getMaxCommitTime(MILLISECONDS)), + assertNotNull(transactionOptions.getWriteConcern()), transactionState == TransactionState.COMMITTED) - .recoveryToken(getRecoveryToken()) - .maxCommitTime(transactionOptions.getMaxCommitTime(MILLISECONDS), MILLISECONDS), + .recoveryToken(getRecoveryToken()), readConcern, this); } } catch (MongoException e) { @@ -177,9 +181,11 @@ public void abortTransaction() { if (readConcern == null) { throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); } - delegate.getOperationExecutor().execute(new AbortTransactionOperation(assertNotNull(transactionOptions.getWriteConcern())) - .recoveryToken(getRecoveryToken()), - readConcern, this); + delegate.getOperationExecutor().execute(new AbortTransactionOperation( + // TODO (CSOT) - JAVA-4067 + ClientSideOperationTimeouts.withMaxCommitMS(null, transactionOptions.getMaxCommitTime(MILLISECONDS)), + assertNotNull(transactionOptions.getWriteConcern())) + .recoveryToken(getRecoveryToken()), readConcern, this); } } catch (RuntimeException e) { // ignore exceptions from abort diff --git a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java index 3c4e1d18ea3..d51fe57a0ad 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java @@ -46,19 +46,12 @@ class DistinctIterableImpl extends MongoIterableImpl documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter) { - this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, fieldName, - filter, true); - } - DistinctIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter, - final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads); + final boolean retryReads, @Nullable final Long timeoutMS) { + super(clientSession, executor, readConcern, readPreference, retryReads, null); + this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutMS); this.resultClass = notNull("resultClass", resultClass); this.fieldName = notNull("mapFunction", fieldName); this.filter = filter; diff --git a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java index b7b405d5a5a..6869e27ea97 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java @@ -49,16 +49,11 @@ class FindIterableImpl extends MongoIterableImpl im private Bson filter; FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final Bson filter) { - this(clientSession, namespace, documentClass, resultClass, codecRegistry, readPreference, readConcern, executor, filter, true); - } - - FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads) { - super(clientSession, executor, readConcern, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads); + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads, + @Nullable final Long timeoutMS) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutMS); + this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutMS); this.resultClass = notNull("resultClass", resultClass); this.filter = notNull("filter", filter); this.findOptions = new FindOptions(); diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java index 305dd3c3182..7a00c64d199 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java @@ -46,17 +46,11 @@ class ListCollectionsIterableImpl extends MongoIterableImpl im private BsonValue comment; ListCollectionsIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final boolean collectionNamesOnly, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor) { - this(clientSession, databaseName, collectionNamesOnly, resultClass, codecRegistry, readPreference, executor, true); - } - - ListCollectionsIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final boolean collectionNamesOnly, - final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor, final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern? + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final OperationExecutor executor, final boolean retryReads, @Nullable final Long timeoutMS) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutMS); // TODO: read concern? this.collectionNamesOnly = collectionNamesOnly; - this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutMS); this.databaseName = notNull("databaseName", databaseName); this.resultClass = notNull("resultClass", resultClass); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java index 50c4eb14b4a..2f9722d35ef 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java @@ -48,17 +48,11 @@ public class ListDatabasesIterableImpl extends MongoIterableImpl resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor) { - this(clientSession, resultClass, codecRegistry, readPreference, executor, true); - } - public ListDatabasesIterableImpl(@Nullable final ClientSession clientSession, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, - final OperationExecutor executor, final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); // TODO: read concern? - this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads); + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, @Nullable final Long timeoutMS) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutMS); // TODO: read concern? + this.operations = new SyncOperations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutMS); this.resultClass = notNull("clazz", resultClass); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java index c2a9d528007..28f103f1bfa 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java @@ -42,15 +42,10 @@ class ListIndexesIterableImpl extends MongoIterableImpl implem private BsonValue comment; ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor) { - this(clientSession, namespace, resultClass, codecRegistry, readPreference, executor, true); - } - - ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class resultClass, - final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, - final boolean retryReads) { - super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads); - this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads); + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, @Nullable final Long timeoutMS) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutMS); + this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutMS); this.resultClass = notNull("resultClass", resultClass); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java index fc949859530..1e6e9ffbc34 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java @@ -55,11 +55,11 @@ final class ListSearchIndexesIterableImpl extends MongoIterableImpl resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final boolean retryReads) { - super(null, executor, readConcern, readPreference, retryReads); + final boolean retryReads, @Nullable final Long timeoutMS) { + super(null, executor, readConcern, readPreference, retryReads, timeoutMS); this.resultClass = resultClass; - this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads); + this.operations = new SyncOperations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutMS); this.codecRegistry = codecRegistry; } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java index 60f47f71dec..81b2bdffb84 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java @@ -69,10 +69,10 @@ class MapReduceIterableImpl extends MongoIterableImpl documentClass, final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, - final String mapFunction, final String reduceFunction) { - super(clientSession, executor, readConcern, readPreference, false); + final String mapFunction, final String reduceFunction, @Nullable final Long timeoutMS) { + super(clientSession, executor, readConcern, readPreference, false, timeoutMS); this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - false, false); + false, false, timeoutMS); this.namespace = notNull("namespace", namespace); this.resultClass = notNull("resultClass", resultClass); this.mapFunction = notNull("mapFunction", mapFunction); diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java index 5c884f66531..9f8cd90b009 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java @@ -93,8 +93,8 @@ public MongoClientImpl(final Cluster cluster, final MongoDriverInformation mongo @Override public MongoDatabase getDatabase(final String databaseName) { return new MongoDatabaseImpl(databaseName, delegate.getCodecRegistry(), settings.getReadPreference(), settings.getWriteConcern(), - settings.getRetryWrites(), settings.getRetryReads(), settings.getReadConcern(), - settings.getUuidRepresentation(), settings.getAutoEncryptionSettings(), delegate.getOperationExecutor()); + settings.getRetryWrites(), settings.getRetryReads(), settings.getReadConcern(), settings.getUuidRepresentation(), + settings.getAutoEncryptionSettings(), getTimeoutMS(), delegate.getOperationExecutor()); } @Override @@ -206,8 +206,8 @@ private ChangeStreamIterable createChangeStreamIterable(@Null final List pipeline, final Class resultClass) { return new ChangeStreamIterableImpl<>(clientSession, "admin", delegate.getCodecRegistry(), settings.getReadPreference(), - settings.getReadConcern(), delegate.getOperationExecutor(), - pipeline, resultClass, ChangeStreamLevel.CLIENT, settings.getRetryReads()); + settings.getReadConcern(), delegate.getOperationExecutor(), pipeline, resultClass, ChangeStreamLevel.CLIENT, + settings.getRetryReads(), getTimeoutMS()); } public Cluster getCluster() { @@ -241,7 +241,7 @@ private static StreamFactory getStreamFactory(final MongoClientSettings settings private ListDatabasesIterable createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class clazz) { return new ListDatabasesIterableImpl<>(clientSession, clazz, delegate.getCodecRegistry(), ReadPreference.primary(), - delegate.getOperationExecutor(), settings.getRetryReads()); + delegate.getOperationExecutor(), settings.getRetryReads(), getTimeoutMS()); } private MongoIterable createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) { @@ -263,4 +263,8 @@ public MongoClientSettings getSettings() { public MongoDriverInformation getMongoDriverInformation() { return mongoDriverInformation; } + + private @Nullable Long getTimeoutMS() { + return null; // TODO (CSOT) - JAVA-4064 + } } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java index 98ed5ec334f..6a31ba0c04d 100755 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java @@ -49,17 +49,18 @@ import com.mongodb.client.model.FindOneAndUpdateOptions; import com.mongodb.client.model.IndexModel; import com.mongodb.client.model.IndexOptions; -import com.mongodb.client.model.SearchIndexModel; import com.mongodb.client.model.InsertManyOptions; import com.mongodb.client.model.InsertOneOptions; import com.mongodb.client.model.RenameCollectionOptions; import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.WriteModel; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.ClientSideOperationTimeouts; import com.mongodb.internal.bulk.WriteRequest; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; @@ -78,7 +79,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.concurrent.TimeUnit; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.assertions.Assertions.notNullElements; import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE; @@ -101,12 +104,15 @@ class MongoCollectionImpl implements MongoCollection { private final UuidRepresentation uuidRepresentation; @Nullable private final AutoEncryptionSettings autoEncryptionSettings; + @Nullable + private final Long timeoutMS; private final OperationExecutor executor; MongoCollectionImpl(final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, - final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites, - final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) { + final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites, + final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, @Nullable final Long timeoutMS, + final OperationExecutor executor) { this.namespace = notNull("namespace", namespace); this.documentClass = notNull("documentClass", documentClass); this.codecRegistry = notNull("codecRegistry", codecRegistry); @@ -118,8 +124,9 @@ class MongoCollectionImpl implements MongoCollection { this.executor = notNull("executor", executor); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutMS = timeoutMS; this.operations = new SyncOperations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, - retryWrites, retryReads); + retryWrites, retryReads, timeoutMS); } @Override @@ -152,34 +159,48 @@ public ReadConcern getReadConcern() { return readConcern; } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS; + } + @Override public MongoCollection withDocumentClass(final Class clazz) { return new MongoCollectionImpl<>(namespace, clazz, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoCollection withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoCollectionImpl<>(namespace, documentClass, withUuidRepresentation(codecRegistry, uuidRepresentation), - readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoCollection withReadPreference(final ReadPreference readPreference) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoCollection withWriteConcern(final WriteConcern writeConcern) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoCollection withReadConcern(final ReadConcern readConcern) { return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); + } + + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + isTrueArgument("timeout >= 0", timeout >= 0); + notNull("timeUnit", timeUnit); + long timeoutMS = timeUnit.convert(timeout, TimeUnit.MILLISECONDS); + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override @@ -253,7 +274,7 @@ public DistinctIterable distinct(final ClientSession clientSe private DistinctIterable createDistinctIterable(@Nullable final ClientSession clientSession, final String fieldName, final Bson filter, final Class resultClass) { return new DistinctIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, executor, fieldName, filter, retryReads); + readPreference, readConcern, executor, fieldName, filter, retryReads, null); } @Override @@ -304,7 +325,7 @@ public FindIterable find(final ClientSession clientSession, f private FindIterable createFindIterable(@Nullable final ClientSession clientSession, final Bson filter, final Class resultClass) { return new FindIterableImpl<>(clientSession, namespace, this.documentClass, resultClass, codecRegistry, - readPreference, readConcern, executor, filter, retryReads); + readPreference, readConcern, executor, filter, retryReads, null); } @Override @@ -333,7 +354,7 @@ private AggregateIterable createAggregateIterable(@Nullable f final List pipeline, final Class resultClass) { return new AggregateIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads); + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads, timeoutMS); } @Override @@ -382,7 +403,7 @@ private ChangeStreamIterable createChangeStreamIterable(@Null final List pipeline, final Class resultClass) { return new ChangeStreamIterableImpl<>(clientSession, namespace, codecRegistry, readPreference, readConcern, executor, - pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads); + pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads, null); } @SuppressWarnings("deprecation") @@ -418,7 +439,7 @@ private com.mongodb.client.MapReduceIterable createMapReduceI final String mapFunction, final String reduceFunction, final Class resultClass) { return new MapReduceIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction); + readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction, timeoutMS); } @Override @@ -943,12 +964,12 @@ public ListIndexesIterable listIndexes(final ClientSession cl private ListIndexesIterable createListIndexesIterable(@Nullable final ClientSession clientSession, final Class resultClass) { return new ListIndexesIterableImpl<>(clientSession, getNamespace(), resultClass, codecRegistry, ReadPreference.primary(), - executor, retryReads); + executor, retryReads, timeoutMS); } private ListSearchIndexesIterable createListSearchIndexesIterable(final Class resultClass) { - return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, readConcern, - resultClass, codecRegistry, readPreference, retryReads); + return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, readConcern, resultClass, codecRegistry, readPreference, + retryReads, timeoutMS); } @Override @@ -1048,7 +1069,8 @@ public void renameCollection(final ClientSession clientSession, final MongoNames private void executeRenameCollection(@Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { - executor.execute(new RenameCollectionOperation(getNamespace(), newCollectionNamespace, writeConcern) + executor.execute(new RenameCollectionOperation(ClientSideOperationTimeouts.create(timeoutMS), getNamespace(), + newCollectionNamespace, writeConcern) .dropTarget(renameCollectionOptions.isDropTarget()), readConcern, clientSession); } diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java index f950ad2432b..d0dee66cf0c 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java @@ -43,10 +43,13 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static com.mongodb.MongoNamespace.COMMAND_COLLECTION_NAME; import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; /** @@ -60,16 +63,19 @@ public class MongoDatabaseImpl implements MongoDatabase { private final boolean retryWrites; private final boolean retryReads; private final ReadConcern readConcern; + private final UuidRepresentation uuidRepresentation; @Nullable private final AutoEncryptionSettings autoEncryptionSettings; + @Nullable + private final Long timeoutMS; private final OperationExecutor executor; - private final UuidRepresentation uuidRepresentation; private final SyncOperations operations; public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, final ReadPreference readPreference, - final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, - final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, - @Nullable final AutoEncryptionSettings autoEncryptionSettings, final OperationExecutor executor) { + final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, + final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, @Nullable final Long timeoutMS, + final OperationExecutor executor) { checkDatabaseNameValidity(name); this.name = notNull("name", name); this.codecRegistry = notNull("codecRegistry", codecRegistry); @@ -80,9 +86,10 @@ public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, f this.readConcern = notNull("readConcern", readConcern); this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutMS = timeoutMS; this.executor = notNull("executor", executor); this.operations = new SyncOperations<>(new MongoNamespace(name, COMMAND_COLLECTION_NAME), BsonDocument.class, readPreference, - codecRegistry, readConcern, writeConcern, retryWrites, retryReads); + codecRegistry, readConcern, writeConcern, retryWrites, retryReads, timeoutMS); } @Override @@ -110,28 +117,42 @@ public ReadConcern getReadConcern() { return readConcern; } + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + @Override public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { return new MongoDatabaseImpl(name, withUuidRepresentation(codecRegistry, uuidRepresentation), readPreference, writeConcern, retryWrites, - retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoDatabase withReadPreference(final ReadPreference readPreference) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoDatabase withWriteConcern(final WriteConcern writeConcern) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override public MongoDatabase withReadConcern(final ReadConcern readConcern) { return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, - uuidRepresentation, autoEncryptionSettings, executor); + uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); + } + + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + isTrueArgument("timeout >= 0", timeout >= 0); + notNull("timeUnit", timeUnit); + long timeoutMS = timeUnit.convert(timeout, TimeUnit.MILLISECONDS); + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override @@ -142,7 +163,7 @@ public MongoCollection getCollection(final String collectionName) { @Override public MongoCollection getCollection(final String collectionName, final Class documentClass) { return new MongoCollectionImpl<>(new MongoNamespace(name, collectionName), documentClass, codecRegistry, readPreference, - writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, executor); + writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutMS, executor); } @Override @@ -193,7 +214,8 @@ private TResult executeCommand(@Nullable final ClientSession clientSes if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) { throw new MongoClientException("Read preference in a transaction must be primary"); } - return executor.execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession); + return executor.execute(operations.commandRead(command, resultClass), + readPreference, readConcern, clientSession); } @Override @@ -252,7 +274,7 @@ private ListCollectionsIterable createListCollectionsIterable final Class resultClass, final boolean collectionNamesOnly) { return new ListCollectionsIterableImpl<>(clientSession, name, collectionNamesOnly, resultClass, codecRegistry, - ReadPreference.primary(), executor, retryReads); + ReadPreference.primary(), executor, retryReads, timeoutMS); } @Override @@ -375,14 +397,14 @@ private AggregateIterable createAggregateIterable(@Nullable f final List pipeline, final Class resultClass) { return new AggregateIterableImpl<>(clientSession, name, Document.class, resultClass, codecRegistry, - readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads); + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads, null); } private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, final List pipeline, final Class resultClass) { return new ChangeStreamIterableImpl<>(clientSession, name, codecRegistry, readPreference, readConcern, executor, - pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads); + pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads, timeoutMS); } private void executeCreateView(@Nullable final ClientSession clientSession, final String viewName, final String viewOn, diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java index 86c2e7b99eb..59ad0208e9d 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java @@ -40,15 +40,17 @@ public abstract class MongoIterableImpl implements MongoIterable> asReadOperation(); @@ -74,6 +76,11 @@ protected boolean getRetryReads() { return retryReads; } + @Nullable + protected Long getTimeoutMS() { + return timeoutMS; + } + @Nullable public Integer getBatchSize() { return batchSize; diff --git a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy index 80eced15c60..5c59579f507 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy @@ -75,7 +75,7 @@ class MongoClientSpecification extends Specification { where: expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(codecRegistry, UNSPECIFIED), secondary(), - WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, new TestOperationExecutor([])) + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, null, new TestOperationExecutor([])) } def 'should use ListDatabasesIterableImpl correctly'() { @@ -90,14 +90,14 @@ class MongoClientSpecification extends Specification { then: expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, null)) when: listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument) then: expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, null)) when: def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable @@ -105,7 +105,7 @@ class MongoClientSpecification extends Specification { then: // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, - withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true).nameOnly(true)) + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, null).nameOnly(true)) cleanup: client?.close() @@ -134,7 +134,7 @@ class MongoClientSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, null), ['codec']) when: @@ -144,7 +144,7 @@ class MongoClientSpecification extends Specification { expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT, - true), ['codec']) + true, null), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -153,7 +153,7 @@ class MongoClientSpecification extends Specification { expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.CLIENT, true), ['codec']) + ChangeStreamLevel.CLIENT, true, null), ['codec']) where: session << [null, Stub(ClientSession)] diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy index 32c03ce2bbc..b37fe66d21e 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy @@ -45,6 +45,7 @@ import org.bson.types.ObjectId import spock.lang.Specification import spock.lang.Unroll +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.secondary @@ -60,7 +61,7 @@ class GridFSBucketSpecification extends Specification { def database = databaseWithExecutor(Stub(OperationExecutor)) def databaseWithExecutor(OperationExecutor executor) { new MongoDatabaseImpl('test', registry, primary(), WriteConcern.ACKNOWLEDGED, false, false, readConcern, - JAVA_LEGACY, null, executor) + JAVA_LEGACY, null, null, executor) } def 'should return the correct bucket name'() { @@ -155,7 +156,7 @@ class GridFSBucketSpecification extends Specification { given: def defaultChunkSizeBytes = 255 * 1024 def database = new MongoDatabaseImpl('test', fromProviders(new DocumentCodecProvider()), secondary(), WriteConcern.ACKNOWLEDGED, - false, false, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([])) + false, false, readConcern, JAVA_LEGACY, null, null, new TestOperationExecutor([])) when: def gridFSBucket = new GridFSBucketImpl(database) @@ -583,8 +584,8 @@ class GridFSBucketSpecification extends Specification { then: executor.getReadPreference() == primary() - expect executor.getReadOperation(), isTheSameAs(new FindOperation(new MongoNamespace('test.fs.files'), decoder) - .filter(new BsonDocument())) + expect executor.getReadOperation(), isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), + new MongoNamespace('test.fs.files'), decoder).filter(new BsonDocument())) when: def filter = new BsonDocument('filename', new BsonString('filename')) @@ -593,8 +594,8 @@ class GridFSBucketSpecification extends Specification { then: executor.getReadPreference() == secondary() - expect executor.getReadOperation(), isTheSameAs(new FindOperation(new MongoNamespace('test.fs.files'), decoder) - .filter(filter)) + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(CSOT_NO_TIMEOUT.get(), new MongoNamespace('test.fs.files'), decoder).filter(filter)) } def 'should throw an exception if file not found when opening by name'() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy index d8b109b1f4b..7dc948a6ab2 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy @@ -35,7 +35,7 @@ class GridFSBucketsSpecification extends Specification { def 'should create a GridFSBucket with default bucket name'() { given: def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, - JAVA_LEGACY, null, Stub(OperationExecutor)) + JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: def gridFSBucket = GridFSBuckets.create(database) @@ -48,7 +48,7 @@ class GridFSBucketsSpecification extends Specification { def 'should create a GridFSBucket with custom bucket name'() { given: def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, - JAVA_LEGACY, null, Stub(OperationExecutor)) + JAVA_LEGACY, null, null, Stub(OperationExecutor)) def customName = 'custom' when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy index e0686420665..b9c242c537d 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy @@ -38,6 +38,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -56,7 +58,7 @@ class GridFSFindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, - new Document()) + new Document(), true, null) def findIterable = new GridFSFindIterableImpl(underlying) when: 'default input should be as expected' @@ -66,14 +68,14 @@ class GridFSFindIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, gridFSFileCodec) .filter(new BsonDocument()).retryReads(true)) readPreference == secondary() when: 'overriding initial options' findIterable.filter(new Document('filter', 2)) .sort(new Document('sort', 2)) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -84,10 +86,9 @@ class GridFSFindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should use the overrides' - expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(CSOT_MAX_TIME.get(), namespace, gridFSFileCodec) .filter(new BsonDocument('filter', new BsonInt32(2))) .sort(new BsonDocument('sort', new BsonInt32(2))) - .maxTime(999, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -101,7 +102,7 @@ class GridFSFindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def findIterable = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1)) + executor, new Document('filter', 1), true, null) when: findIterable.filter(new Document('filter', 1)) @@ -111,7 +112,7 @@ class GridFSFindIterableSpecification extends Specification { def operation = executor.getReadOperation() as FindOperation then: - expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + expect operation, isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, gridFSFileCodec) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .cursorType(CursorType.NonTailable) @@ -148,7 +149,7 @@ class GridFSFindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, - new Document()) + new Document(), true, null) def mongoIterable = new GridFSFindIterableImpl(underlying) when: diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy index 64bbae0ad1f..932a0000b69 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy @@ -41,6 +41,9 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME_AND_MAX_AWAIT_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -62,7 +65,7 @@ class AggregateIterableSpecification extends Specification { def pipeline = [new Document('$match', 1)] def aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, - true) + true, null) when: 'default input should be as expected' aggregationIterable.iterator() @@ -71,15 +74,15 @@ class AggregateIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new AggregateOperation(namespace, + expect operation, isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .retryReads(true)) readPreference == secondary() when: 'overriding initial options' aggregationIterable - .maxAwaitTime(99, MILLISECONDS) - .maxTime(999, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS) .collation(collation) .hint(new Document('a', 1)) .comment('this is a comment') @@ -88,18 +91,16 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateOperation(namespace, + expect operation, isTheSameAs(new AggregateOperation(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .retryReads(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) - .comment(new BsonString('this is a comment')) - .maxAwaitTime(99, MILLISECONDS) - .maxTime(999, MILLISECONDS)) + .comment(new BsonString('this is a comment'))) when: 'both hint and hint string are set' aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) aggregationIterable .hint(new Document('a', 1)) @@ -109,7 +110,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateOperation then: 'should use hint not hint string' - expect operation, isTheSameAs(new AggregateOperation(namespace, + expect operation, isTheSameAs(new AggregateOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) .hint(new BsonDocument('a', new BsonInt32(1)))) } @@ -123,9 +124,8 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .batchSize(99) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -135,10 +135,9 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -152,14 +151,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $out and is at the database level' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.DATABASE, false) + pipeline, AggregationLevel.DATABASE, false, null) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -169,11 +166,10 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_MAX_TIME.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.DATABASE) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -187,13 +183,11 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 operation.isAllowDiskUse() == null when: 'toCollection should work as expected' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -203,7 +197,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern) .allowDiskUse(true) @@ -220,13 +214,13 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and hint string' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .hintString('x_1').iterator() def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) .hint(new BsonString('x_1'))) @@ -234,14 +228,14 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and hint and hint string' executor = new TestOperationExecutor([null, null, null, null, null]) new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .hint(new BsonDocument('x', new BsonInt32(1))) .hintString('x_1').iterator() operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the hint not the hint string' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], readConcern, writeConcern, AggregationLevel.COLLECTION) .hint(new BsonDocument('x', new BsonInt32(1)))) @@ -258,9 +252,8 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $merge' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .batchSize(99) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -269,12 +262,11 @@ class AggregateIterableSpecification extends Specification { def operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -288,14 +280,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $merge into a different database' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipelineWithIntoDocument, AggregationLevel.COLLECTION, false) + pipelineWithIntoDocument, AggregationLevel.COLLECTION, false, null) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -304,13 +294,12 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_MAX_TIME.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonDocument('db', new BsonString('db2')).append('coll', new BsonString(collectionName))))], readConcern, writeConcern, AggregationLevel.COLLECTION) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -324,14 +313,12 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == new MongoNamespace('db2', collectionName) operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'aggregation includes $merge and is at the database level' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.DATABASE, false) + pipeline, AggregationLevel.DATABASE, false, null) .batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -340,12 +327,11 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: 'should use the overrides' - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_MAX_TIME.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern, AggregationLevel.DATABASE) - .maxTime(999, MILLISECONDS) .allowDiskUse(true) .collation(collation) .hint(new BsonDocument('a', new BsonInt32(1))) @@ -359,12 +345,10 @@ class AggregateIterableSpecification extends Specification { operation.getNamespace() == collectionNamespace operation.getBatchSize() == 99 operation.getCollation() == collation - operation.getMaxAwaitTime(MILLISECONDS) == 0 - operation.getMaxTime(MILLISECONDS) == 0 when: 'toCollection should work as expected' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .allowDiskUse(true) .collation(collation) .hint(new Document('a', 1)) @@ -374,7 +358,7 @@ class AggregateIterableSpecification extends Specification { operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], readConcern, writeConcern) @@ -393,14 +377,14 @@ class AggregateIterableSpecification extends Specification { when: new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false) + pipeline, AggregationLevel.COLLECTION, false, null) .iterator() def operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, writeConcern, - AggregationLevel.COLLECTION)) + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, pipeline, readConcern, + writeConcern, AggregationLevel.COLLECTION)) when: operation = executor.getReadOperation() as FindOperation @@ -436,13 +420,13 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out' def aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) aggregateIterable.toCollection() def operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern, AggregationLevel.COLLECTION) ) @@ -455,13 +439,13 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out and is at the database level' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false, null) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern, AggregationLevel.DATABASE) ) @@ -474,13 +458,13 @@ class AggregateIterableSpecification extends Specification { when: 'toCollection should work as expected' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], readConcern, writeConcern)) @@ -492,13 +476,13 @@ class AggregateIterableSpecification extends Specification { when: 'aggregation includes $out with namespace' aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false, null) aggregateIterable.toCollection() operation = executor.getReadOperation() as AggregateToCollectionOperation then: - expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + expect operation, isTheSameAs(new AggregateToCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {db: "testDB", coll: "testCollection"}}')], readConcern, writeConcern)) @@ -519,7 +503,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([batchCursor, batchCursor]) def pipeline = [new Document('$match', 1)] def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) when: aggregationIterable.first() @@ -545,7 +529,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) def pipeline = [new Document('$match', 1), new Document('$out', 'collName')] def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) when: aggregationIterable.first() @@ -576,7 +560,7 @@ class AggregateIterableSpecification extends Specification { def executor = new TestOperationExecutor([new MongoException('failure')]) def pipeline = [new BsonDocument('$match', new BsonInt32(1))] def aggregationIterable = new AggregateIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, - readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, null) when: 'The operation fails with an exception' aggregationIterable.iterator() @@ -592,14 +576,14 @@ class AggregateIterableSpecification extends Specification { when: 'a codec is missing' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - pipeline, AggregationLevel.COLLECTION, false).iterator() + pipeline, AggregationLevel.COLLECTION, false, null).iterator() then: thrown(CodecConfigurationException) when: 'pipeline contains null' new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - [null], AggregationLevel.COLLECTION, false).iterator() + [null], AggregationLevel.COLLECTION, false, null).iterator() then: thrown(IllegalArgumentException) @@ -627,7 +611,7 @@ class AggregateIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false) + readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false, null) when: def results = mongoIterable.first() @@ -672,7 +656,7 @@ class AggregateIterableSpecification extends Specification { def batchSize = 5 def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, Stub(OperationExecutor), [new Document('$match', 1)], AggregationLevel.COLLECTION, - false) + false, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy index 7141db09c43..56147a8c147 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy @@ -20,7 +20,6 @@ import com.mongodb.Function import com.mongodb.MongoException import com.mongodb.MongoNamespace import com.mongodb.ReadConcern -import com.mongodb.WriteConcern import com.mongodb.client.ClientSession import com.mongodb.client.model.Collation import com.mongodb.client.model.changestream.ChangeStreamDocument @@ -43,6 +42,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_AWAIT_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -54,7 +55,6 @@ class ChangeStreamIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) def readPreference = secondary() def readConcern = ReadConcern.MAJORITY - def writeConcern = WriteConcern.MAJORITY def collation = Collation.builder().locale('en').build() def 'should build the expected ChangeStreamOperation'() { @@ -62,7 +62,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([null, null, null, null, null]) def pipeline = [new Document('$match', 1)] def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true) + executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true, null) when: 'default input should be as expected' changeStreamIterable.iterator() @@ -72,14 +72,17 @@ class ChangeStreamIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, - [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION).retryReads(true)) + expect operation, isTheSameAs(new ChangeStreamOperation(CSOT_NO_TIMEOUT.get(), namespace, + FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) + .retryReads(true)) readPreference == secondary() when: 'overriding initial options' def resumeToken = RawBsonDocument.parse('{_id: {a: 1}}') def startAtOperationTime = new BsonTimestamp(99) - changeStreamIterable.collation(collation).maxAwaitTime(99, MILLISECONDS) + changeStreamIterable.collation(collation) + .maxAwaitTime(101, MILLISECONDS) .fullDocument(FullDocument.UPDATE_LOOKUP) .fullDocumentBeforeChange(FullDocumentBeforeChange.WHEN_AVAILABLE) .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime) @@ -88,12 +91,14 @@ class ChangeStreamIterableSpecification extends Specification { operation = executor.getReadOperation() as ChangeStreamOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ChangeStreamOperation(namespace, FullDocument.UPDATE_LOOKUP, - FullDocumentBeforeChange.WHEN_AVAILABLE, - [BsonDocument.parse('{$match: 1}')], codec, ChangeStreamLevel.COLLECTION) + expect operation, isTheSameAs(new ChangeStreamOperation(CSOT_MAX_AWAIT_TIME.get(), namespace, + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.WHEN_AVAILABLE, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) .retryReads(true) - .collation(collation).maxAwaitTime(99, MILLISECONDS) - .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime).startAfter(resumeToken)) + .collation(collation) + .resumeAfter(resumeToken) + .startAtOperationTime(startAtOperationTime) + .startAfter(resumeToken)) } def 'should use ClientSession'() { @@ -103,7 +108,7 @@ class ChangeStreamIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def changeStreamIterable = new ChangeStreamIterableImpl(clientSession, namespace, codecRegistry, readPreference, readConcern, - executor, [], Document, ChangeStreamLevel.COLLECTION, true) + executor, [], Document, ChangeStreamLevel.COLLECTION, true, null) when: changeStreamIterable.first() @@ -127,7 +132,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([new MongoException('failure')]) def pipeline = [new BsonDocument('$match', new BsonInt32(1))] def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true) + executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true, null) when: 'The operation fails with an exception' changeStreamIterable.iterator() @@ -137,14 +142,14 @@ class ChangeStreamIterableSpecification extends Specification { when: 'a codec is missing' new ChangeStreamIterableImpl(null, namespace, altRegistry, readPreference, readConcern, executor, pipeline, Document, - ChangeStreamLevel.COLLECTION, true).iterator() + ChangeStreamLevel.COLLECTION, true, null).iterator() then: thrown(CodecConfigurationException) when: 'pipeline contains null' new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [null], Document, - ChangeStreamLevel.COLLECTION, true).iterator() + ChangeStreamLevel.COLLECTION, true, null).iterator() then: thrown(IllegalArgumentException) @@ -159,7 +164,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults), cursor(cannedResults)]) def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], - Document, ChangeStreamLevel.COLLECTION, true) + Document, ChangeStreamLevel.COLLECTION, true, null) when: def results = mongoIterable.first() @@ -207,7 +212,7 @@ class ChangeStreamIterableSpecification extends Specification { def executor = new TestOperationExecutor([cursor(cannedResults), cursor(cannedResults), cursor(cannedResults), cursor(cannedResults)]) def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], - Document, ChangeStreamLevel.COLLECTION, true).withDocumentClass(RawBsonDocument) + Document, ChangeStreamLevel.COLLECTION, true, null).withDocumentClass(RawBsonDocument) when: def results = mongoIterable.first() @@ -251,7 +256,7 @@ class ChangeStreamIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, - Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true) + Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy index 8a7898581a2..f3cbd217107 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy @@ -37,6 +37,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -55,7 +57,7 @@ class DistinctIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def distinctIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, 'field', new BsonDocument(), true) + executor, 'field', new BsonDocument(), true, null) when: 'default input should be as expected' distinctIterable.iterator() @@ -64,19 +66,18 @@ class DistinctIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new DistinctOperation(namespace, 'field', new DocumentCodec()) + expect operation, isTheSameAs(new DistinctOperation(CSOT_NO_TIMEOUT.get(), namespace, 'field', new DocumentCodec()) .filter(new BsonDocument()).retryReads(true)) readPreference == secondary() when: 'overriding initial options' - distinctIterable.filter(new Document('field', 1)).maxTime(999, MILLISECONDS).batchSize(99).collation(collation).iterator() + distinctIterable.filter(new Document('field', 1)).maxTime(100, MILLISECONDS).batchSize(99).collation(collation).iterator() operation = executor.getReadOperation() as DistinctOperation then: 'should use the overrides' - expect operation, isTheSameAs(new DistinctOperation(namespace, 'field', new DocumentCodec()) - .filter(new BsonDocument('field', new BsonInt32(1))) - .maxTime(999, MILLISECONDS).collation(collation).retryReads(true)) + expect operation, isTheSameAs(new DistinctOperation(CSOT_MAX_TIME.get(), namespace, 'field', new DocumentCodec()) + .filter(new BsonDocument('field', new BsonInt32(1))).collation(collation).retryReads(true)) } def 'should use ClientSession'() { @@ -86,7 +87,7 @@ class DistinctIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def distinctIterable = new DistinctIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, executor, 'field', new BsonDocument()) + readConcern, executor, 'field', new BsonDocument(), true, null) when: distinctIterable.first() @@ -109,7 +110,7 @@ class DistinctIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def distinctIterable = new DistinctIterableImpl(null, namespace, Document, BsonDocument, codecRegistry, readPreference, - readConcern, executor, 'field', new BsonDocument()) + readConcern, executor, 'field', new BsonDocument(), true, null) when: 'The operation fails with an exception' distinctIterable.iterator() @@ -145,7 +146,7 @@ class DistinctIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, ReadConcern.LOCAL, - executor, 'field', new BsonDocument()) + executor, 'field', new BsonDocument(), true, null) when: def results = mongoIterable.first() @@ -189,7 +190,7 @@ class DistinctIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - Stub(OperationExecutor), 'field', new BsonDocument()) + Stub(OperationExecutor), 'field', new BsonDocument(), true, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy index ebfe762cf90..91d68ef1e8c 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy @@ -16,7 +16,6 @@ package com.mongodb.client.internal - import com.mongodb.CursorType import com.mongodb.Function import com.mongodb.MongoException @@ -39,10 +38,11 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME_AND_MAX_AWAIT_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS -import static java.util.concurrent.TimeUnit.SECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders import static spock.util.matcher.HamcrestSupport.expect @@ -59,11 +59,9 @@ class FindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, null) .sort(new Document('sort', 1)) .projection(new Document('projection', 1)) - .maxTime(10, SECONDS) - .maxAwaitTime(20, SECONDS) .batchSize(100) .limit(100) .skip(10) @@ -87,12 +85,10 @@ class FindIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(10000, MILLISECONDS) - .maxAwaitTime(20000, MILLISECONDS) .batchSize(100) .limit(100) .skip(10) @@ -112,8 +108,8 @@ class FindIterableSpecification extends Specification { findIterable.filter(new Document('filter', 2)) .sort(new Document('sort', 2)) .projection(new Document('projection', 2)) - .maxTime(9, SECONDS) - .maxAwaitTime(18, SECONDS) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -134,12 +130,10 @@ class FindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should use the overrides' - expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(CSOT_MAX_TIME_AND_MAX_AWAIT_TIME.get(), namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(2))) .sort(new BsonDocument('sort', new BsonInt32(2))) .projection(new BsonDocument('projection', new BsonInt32(2))) - .maxTime(9000, MILLISECONDS) - .maxAwaitTime(18000, MILLISECONDS) .batchSize(99) .limit(99) .skip(9) @@ -160,7 +154,7 @@ class FindIterableSpecification extends Specification { when: 'passing nulls to nullable methods' new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, null) .filter(null as Bson) .collation(null) .projection(null) @@ -174,7 +168,7 @@ class FindIterableSpecification extends Specification { operation = executor.getReadOperation() as FindOperation then: 'should set an empty doc for the filter' - expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()) .filter(new BsonDocument()).retryReads(true)) } @@ -185,7 +179,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def findIterable = new FindIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1)) + executor, new Document('filter', 1), true, null) when: findIterable.first() @@ -207,7 +201,7 @@ class FindIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document('filter', 1), true) + executor, new Document('filter', 1), true, null) when: findIterable.filter(new Document('filter', 1)) @@ -217,7 +211,7 @@ class FindIterableSpecification extends Specification { def operation = executor.getReadOperation() as FindOperation then: - expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + expect operation, isTheSameAs(new FindOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .cursorType(CursorType.NonTailable) @@ -247,7 +241,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document()) + executor, new Document(), true, null) when: def results = mongoIterable.first() @@ -291,7 +285,7 @@ class FindIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, Stub(OperationExecutor), new Document()) + readConcern, Stub(OperationExecutor), new Document(), true, null) then: mongoIterable.getBatchSize() == null @@ -313,7 +307,7 @@ class FindIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor]) def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - executor, new Document()) + executor, new Document(), true, null) when: mongoIterable.forEach(new Consumer() { diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy index 1719cf9c21b..dcccfcbf006 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy @@ -30,11 +30,13 @@ import org.bson.codecs.DocumentCodecProvider import org.bson.codecs.ValueCodecProvider import spock.lang.Specification +import java.util.concurrent.TimeUnit import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary -import static java.util.concurrent.TimeUnit.MILLISECONDS import static org.bson.codecs.configuration.CodecRegistries.fromProviders import static spock.util.matcher.HamcrestSupport.expect @@ -48,12 +50,11 @@ class ListCollectionsIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def listCollectionIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, - readPreference, executor) + readPreference, executor, true, null) .filter(new Document('filter', 1)) .batchSize(100) - .maxTime(1000, MILLISECONDS) def listCollectionNamesIterable = new ListCollectionsIterableImpl(null, 'db', true, Document, codecRegistry, - readPreference, executor) + readPreference, executor, true, null) when: 'default input should be as expected' listCollectionIterable.iterator() @@ -62,19 +63,19 @@ class ListCollectionsIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) - .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100).maxTime(1000, MILLISECONDS) + expect operation, isTheSameAs(new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), 'db', new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100) .retryReads(true)) readPreference == secondary() when: 'overriding initial options' - listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(999, MILLISECONDS).iterator() + listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(100, TimeUnit.MILLISECONDS).iterator() operation = executor.getReadOperation() as ListCollectionsOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) - .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99).maxTime(999, MILLISECONDS) + expect operation, isTheSameAs(new ListCollectionsOperation(CSOT_MAX_TIME.get(), 'db', new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99) .retryReads(true)) when: 'requesting collection names only' @@ -83,8 +84,8 @@ class ListCollectionsIterableSpecification extends Specification { operation = executor.getReadOperation() as ListCollectionsOperation then: 'should create operation with nameOnly' - expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()).nameOnly(true) - .retryReads(true)) + expect operation, isTheSameAs(new ListCollectionsOperation(CSOT_NO_TIMEOUT.get(), 'db', new DocumentCodec()) + .nameOnly(true).retryReads(true)) } def 'should use ClientSession'() { @@ -94,7 +95,7 @@ class ListCollectionsIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def listCollectionIterable = new ListCollectionsIterableImpl(clientSession, 'db', false, Document, codecRegistry, - readPreference, executor) + readPreference, executor, true, null) when: listCollectionIterable.first() @@ -134,7 +135,7 @@ class ListCollectionsIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, - executor) + executor, true, null) when: def results = mongoIterable.first() @@ -178,7 +179,7 @@ class ListCollectionsIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, - Stub(OperationExecutor)) + Stub(OperationExecutor), true, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy index bfe4adb26f9..5f7656a5d9d 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy @@ -30,6 +30,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -45,8 +47,8 @@ class ListDatabasesIterableSpecification extends Specification { def 'should build the expected listCollectionOperation'() { given: def executor = new TestOperationExecutor([null, null, null]) - def listDatabaseIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor) - .maxTime(1000, MILLISECONDS) + def listDatabaseIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, true, + null) when: 'default input should be as expected' listDatabaseIterable.iterator() @@ -55,26 +57,26 @@ class ListDatabasesIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(1000, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(CSOT_NO_TIMEOUT.get(), new DocumentCodec()) .retryReads(true)) readPreference == secondary() when: 'overriding initial options' - listDatabaseIterable.maxTime(999, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator() + listDatabaseIterable.maxTime(100, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator() operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(999, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(CSOT_MAX_TIME.get(), new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).retryReads(true)) when: 'overriding initial options' - listDatabaseIterable.maxTime(101, MILLISECONDS).filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator() + listDatabaseIterable.filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator() operation = executor.getReadOperation() as ListDatabasesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()).maxTime(101, MILLISECONDS) + expect operation, isTheSameAs(new ListDatabasesOperation(CSOT_MAX_TIME.get(), new DocumentCodec()) .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).authorizedDatabasesOnly(true).retryReads(true)) } @@ -99,7 +101,7 @@ class ListDatabasesIterableSpecification extends Specification { } } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) - def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor) + def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, true, null) when: def results = mongoIterable.first() @@ -143,7 +145,7 @@ class ListDatabasesIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, - Stub(OperationExecutor)) + Stub(OperationExecutor), true, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy index d1090fe1525..a71b9aa3a9d 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy @@ -31,6 +31,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -47,8 +49,8 @@ class ListIndexesIterableSpecification extends Specification { def 'should build the expected listIndexesOperation'() { given: def executor = new TestOperationExecutor([null, null]) - def listIndexesIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, executor) - .batchSize(100).maxTime(1000, MILLISECONDS) + def listIndexesIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, null).batchSize(100) when: 'default input should be as expected' listIndexesIterable.iterator() @@ -57,20 +59,20 @@ class ListIndexesIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) - .batchSize(100).maxTime(1000, MILLISECONDS).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()) + .batchSize(100).retryReads(true)) readPreference == secondary() when: 'overriding initial options' listIndexesIterable.batchSize(99) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .iterator() operation = executor.getReadOperation() as ListIndexesOperation then: 'should use the overrides' - expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) - .batchSize(99).maxTime(999, MILLISECONDS).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(CSOT_MAX_TIME.get(), namespace, new DocumentCodec()) + .batchSize(99).retryReads(true)) } def 'should use ClientSession'() { @@ -80,7 +82,7 @@ class ListIndexesIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def listIndexesIterable = new ListIndexesIterableImpl(clientSession, namespace, Document, codecRegistry, readPreference, - executor) + executor, true, null) when: listIndexesIterable.first() @@ -120,7 +122,8 @@ class ListIndexesIterableSpecification extends Specification { } } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) - def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, executor) + def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, null) when: def results = mongoIterable.first() @@ -164,7 +167,7 @@ class ListIndexesIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, - Stub(OperationExecutor)) + Stub(OperationExecutor), true, null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy index 8983c835701..751f2f0385c 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy @@ -42,6 +42,8 @@ import spock.lang.Specification import java.util.function.Consumer +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.secondary import static java.util.concurrent.TimeUnit.MILLISECONDS @@ -62,7 +64,7 @@ class MapReduceIterableSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', null) when: 'default input should be as expected' mapReduceIterable.iterator() @@ -71,8 +73,8 @@ class MapReduceIterableSpecification extends Specification { def readPreference = executor.getReadPreference() then: - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), new DocumentCodec()) + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(CSOT_NO_TIMEOUT.get(), namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .verbose(true)) readPreference == secondary() @@ -80,7 +82,7 @@ class MapReduceIterableSpecification extends Specification { mapReduceIterable.filter(new Document('filter', 1)) .finalizeFunction('finalize') .limit(999) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document('scope', 1)) .sort(new Document('sort', 1)) .verbose(false) @@ -90,12 +92,11 @@ class MapReduceIterableSpecification extends Specification { operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation).getOperation() then: 'should use the overrides' - expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), new DocumentCodec()) + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(CSOT_MAX_TIME.get(), namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) .filter(new BsonDocument('filter', new BsonInt32(1))) .finalizeFunction(new BsonJavaScript('finalize')) .limit(999) - .maxTime(999, MILLISECONDS) .scope(new BsonDocument('scope', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .verbose(false) @@ -109,14 +110,14 @@ class MapReduceIterableSpecification extends Specification { when: 'mapReduce to a collection' def collectionNamespace = new MongoNamespace('dbName', 'collName') - def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, - writeConcern, executor, 'map', 'reduce') + def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', null) .collectionName(collectionNamespace.getCollectionName()) .databaseName(collectionNamespace.getDatabaseName()) .filter(new Document('filter', 1)) .finalizeFunction('finalize') .limit(999) - .maxTime(999, MILLISECONDS) + .maxTime(100, MILLISECONDS) .scope(new Document('scope', 1)) .sort(new Document('sort', 1)) .verbose(false) @@ -130,13 +131,12 @@ class MapReduceIterableSpecification extends Specification { mapReduceIterable.iterator() def operation = executor.getWriteOperation() as MapReduceToCollectionOperation - def expectedOperation = new MapReduceToCollectionOperation(namespace, new BsonJavaScript('map'), - new BsonJavaScript('reduce'), 'collName', writeConcern) + def expectedOperation = new MapReduceToCollectionOperation(CSOT_MAX_TIME.get(), namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'collName', writeConcern) .databaseName(collectionNamespace.getDatabaseName()) .filter(new BsonDocument('filter', new BsonInt32(1))) .finalizeFunction(new BsonJavaScript('finalize')) .limit(999) - .maxTime(999, MILLISECONDS) .scope(new BsonDocument('scope', new BsonInt32(1))) .sort(new BsonDocument('sort', new BsonInt32(1))) .verbose(false) @@ -174,7 +174,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([batchCursor, batchCursor]) def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', null) when: mapReduceIterable.first() @@ -199,7 +199,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', null) .collectionName('collName') when: @@ -232,7 +232,7 @@ class MapReduceIterableSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def mapReduceIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, - readPreference, readConcern, writeConcern, executor, 'map', 'reduce') + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', null) when: 'The operation fails with an exception' @@ -249,7 +249,7 @@ class MapReduceIterableSpecification extends Specification { when: 'a codec is missing' new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, - 'map', 'reduce').iterator() + 'map', 'reduce', null).iterator() then: thrown(CodecConfigurationException) @@ -278,7 +278,7 @@ class MapReduceIterableSpecification extends Specification { } def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) def mongoIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, - readConcern, writeConcern, executor, 'map', 'reduce') + readConcern, writeConcern, executor, 'map', 'reduce', null) when: def results = mongoIterable.first() @@ -322,7 +322,7 @@ class MapReduceIterableSpecification extends Specification { when: def batchSize = 5 def mongoIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, - readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce') + readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce', null) then: mongoIterable.getBatchSize() == null diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy index 1a45c682ae6..dbce4f41614 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy @@ -92,6 +92,8 @@ import spock.lang.Specification import java.util.concurrent.TimeUnit +import static com.mongodb.ClusterFixture.CSOT_MAX_TIME +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.secondary @@ -122,7 +124,7 @@ class MongoCollectionSpecification extends Specification { def 'should return the correct name from getName'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, new TestOperationExecutor([null])) + true, readConcern, JAVA_LEGACY, null, null, new TestOperationExecutor([null])) expect: collection.getNamespace() == namespace @@ -135,12 +137,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withDocumentClass(newClass) + true, true, readConcern, JAVA_LEGACY, null, null, executor).withDocumentClass(newClass) then: collection.getDocumentClass() == newClass expect collection, isTheSameAs(new MongoCollectionImpl(namespace, newClass, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, null, executor)) } def 'should behave correctly when using withCodecRegistry'() { @@ -150,12 +152,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, C_SHARP_LEGACY, null, executor).withCodecRegistry(newCodecRegistry) + true, true, readConcern, C_SHARP_LEGACY, null, null, executor).withCodecRegistry(newCodecRegistry) then: (collection.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, collection.getCodecRegistry(), readPreference, - ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, executor)) + ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, null, executor)) } def 'should behave correctly when using withReadPreference'() { @@ -165,12 +167,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withReadPreference(newReadPreference) + true, true, readConcern, JAVA_LEGACY, null, null, executor).withReadPreference(newReadPreference) then: collection.getReadPreference() == newReadPreference expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, newReadPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, null, executor)) } def 'should behave correctly when using withWriteConcern'() { @@ -180,12 +182,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withWriteConcern(newWriteConcern) + true, true, readConcern, JAVA_LEGACY, null, null, executor).withWriteConcern(newWriteConcern) then: collection.getWriteConcern() == newWriteConcern expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, newWriteConcern, - true, true, readConcern, JAVA_LEGACY, null, executor)) + true, true, readConcern, JAVA_LEGACY, null, null, executor)) } def 'should behave correctly when using withReadConcern'() { @@ -195,12 +197,12 @@ class MongoCollectionSpecification extends Specification { when: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor).withReadConcern(newReadConcern) + true, true, readConcern, JAVA_LEGACY, null, null, executor).withReadConcern(newReadConcern) then: collection.getReadConcern() == newReadConcern expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, newReadConcern, JAVA_LEGACY, null, executor)) + true, true, newReadConcern, JAVA_LEGACY, null, null, executor)) } def 'should use CountOperation correctly with documentCount'() { @@ -208,8 +210,9 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([1L, 2L, 3L, 4L]) def filter = new BsonDocument() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new CountDocumentsOperation(namespace).filter(filter).retryReads(true) + true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new CountDocumentsOperation(CSOT_NO_TIMEOUT.get(), namespace) + .filter(filter).retryReads(true) def countMethod = collection.&countDocuments @@ -232,13 +235,12 @@ class MongoCollectionSpecification extends Specification { when: def hint = new BsonDocument('hint', new BsonInt32(1)) - execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100) - .maxTime(100, MILLISECONDS).collation(collation)) + execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100).collation(collation)) operation = executor.getReadOperation() as CountDocumentsOperation then: executor.getClientSession() == session - expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100).maxTime(100, MILLISECONDS) + expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100) .collation(collation)) where: @@ -249,8 +251,8 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([1L, 2L]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, - true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new EstimatedDocumentCountOperation(namespace) + true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new EstimatedDocumentCountOperation(CSOT_NO_TIMEOUT.get(), namespace) .retryReads(true) def countMethod = collection.&estimatedDocumentCount @@ -264,12 +266,13 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new EstimatedDocumentCountOperation(CSOT_MAX_TIME.get(), namespace).retryReads(true) execute(countMethod, session, new EstimatedDocumentCountOptions().maxTime(100, MILLISECONDS)) operation = executor.getReadOperation() as EstimatedDocumentCountOperation then: executor.getClientSession() == session - expect operation, isTheSameAs(expectedOperation.maxTime(100, MILLISECONDS)) + expect operation, isTheSameAs(expectedOperation) where: session << [null] @@ -279,7 +282,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def filter = new Document('a', 1) def distinctMethod = collection.&distinct @@ -288,14 +291,14 @@ class MongoCollectionSpecification extends Specification { then: expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, - codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true)) + codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true, null)) when: distinctIterable = execute(distinctMethod, session, 'field', String).filter(filter) then: expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, - codecRegistry, readPreference, readConcern, executor, 'field', filter, true)) + codecRegistry, readPreference, readConcern, executor, 'field', filter, true, null)) where: session << [null, Stub(ClientSession)] @@ -305,7 +308,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def findMethod = collection.&find when: @@ -313,28 +316,28 @@ class MongoCollectionSpecification extends Specification { then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, codecRegistry, - readPreference, readConcern, executor, new BsonDocument(), true)) + readPreference, readConcern, executor, new BsonDocument(), true, null)) when: findIterable = execute(findMethod, session, BsonDocument) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true)) + codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true, null)) when: findIterable = execute(findMethod, session, new Document()) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, - codecRegistry, readPreference, readConcern, executor, new Document(), true)) + codecRegistry, readPreference, readConcern, executor, new Document(), true, null)) when: findIterable = execute(findMethod, session, new Document(), BsonDocument) then: expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, executor, new Document(), true)) + codecRegistry, readPreference, readConcern, executor, new Document(), true, null)) where: session << [null, Stub(ClientSession)] @@ -344,7 +347,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def aggregateMethod = collection.&aggregate when: @@ -353,7 +356,7 @@ class MongoCollectionSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, Document, codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], - AggregationLevel.COLLECTION, true)) + AggregationLevel.COLLECTION, true, null)) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) @@ -361,7 +364,7 @@ class MongoCollectionSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, BsonDocument, codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], - AggregationLevel.COLLECTION, true)) + AggregationLevel.COLLECTION, true, null)) where: session << [null, Stub(ClientSession)] @@ -371,7 +374,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.aggregate(null) @@ -390,7 +393,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def watchMethod = collection.&watch when: @@ -398,7 +401,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true, null), ['codec']) when: @@ -407,7 +410,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], Document, - ChangeStreamLevel.COLLECTION, true), ['codec']) + ChangeStreamLevel.COLLECTION, true, null), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -415,7 +418,7 @@ class MongoCollectionSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.COLLECTION, true), ['codec']) + ChangeStreamLevel.COLLECTION, true, null), ['codec']) where: session << [null, Stub(ClientSession)] @@ -425,7 +428,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.watch((Class) null) @@ -444,7 +447,7 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def mapReduceMethod = collection.&mapReduce when: @@ -452,14 +455,14 @@ class MongoCollectionSpecification extends Specification { then: expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, Document, - codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce')) + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', null)) when: mapReduceIterable = execute(mapReduceMethod, session, 'map', 'reduce', BsonDocument) then: expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, BsonDocument, - codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce')) + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', null)) where: session << [null, Stub(ClientSession)] @@ -471,9 +474,9 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, BsonDocument, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassValidation, List filters -> - new MixedBulkWriteOperation(namespace, [ + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [ new InsertRequest(BsonDocument.parse('{_id: 1}')), new UpdateRequest(BsonDocument.parse('{a: 2}'), BsonDocument.parse('{a: 200}'), REPLACE) .multi(false).upsert(true).collation(collation).hint(hint).hintString(hintString), @@ -538,7 +541,7 @@ class MongoCollectionSpecification extends Specification { def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) def executor = new TestOperationExecutor([new MongoException('failure')]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.bulkWrite(null) @@ -565,9 +568,9 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { WriteConcern wc, Boolean bypassDocumentValidation -> - new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], true, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) } def insertOneMethod = collection.&insertOne @@ -610,9 +613,9 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassDocumentValidation -> - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))], ordered, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) @@ -656,7 +659,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the insertMany data correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: collection.insertMany(null) @@ -678,7 +681,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def deleteOneMethod = collection.&deleteOne when: @@ -687,7 +690,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false)], true, writeConcern, retryWrites)) result == expectedResult @@ -699,7 +702,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false).collation(collation)], true, writeConcern, retryWrites)) result == expectedResult @@ -720,7 +723,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([bulkWriteException]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.deleteOne(new Document('_id', 1)) @@ -741,7 +744,7 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def deleteManyMethod = collection.&deleteMany when: @@ -750,7 +753,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true)], true, writeConcern, retryWrites)) result == expectedResult @@ -761,7 +764,7 @@ class MongoCollectionSpecification extends Specification { then: result.wasAcknowledged() == writeConcern.isAcknowledged() - expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + expect operation, isTheSameAs(new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true).collation(collation)], true, writeConcern, retryWrites)) result == expectedResult @@ -785,10 +788,10 @@ class MongoCollectionSpecification extends Specification { def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, modifiedCount, upsertedId) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassValidation, Collation collation -> - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), REPLACE) .collation(collation).upsert(upsert).hint(hint).hintString(hintString)], true, wc, retryWrites) .bypassDocumentValidation(bypassValidation) @@ -827,7 +830,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([bulkWriteException]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.replaceOne(new Document('_id', 1), new Document('_id', 1)) @@ -855,10 +858,10 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, 0, null) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) .multi(false).upsert(upsert).collation(collation).arrayFilters(filters) .hint(hintDoc).hintString(hintStr)], true, wc, retryWrites) @@ -904,10 +907,10 @@ class MongoCollectionSpecification extends Specification { }) def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(5, 3, null) : UpdateResult.unacknowledged() def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, List filters, BsonDocument hintDoc, String hintStr -> - new MixedBulkWriteOperation(namespace, + new MixedBulkWriteOperation(CSOT_NO_TIMEOUT.get(), namespace, [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) .multi(true).upsert(upsert).collation(collation).arrayFilters(filters) .hint(hintDoc).hintString(hintStr)], true, wc, retryWrites) @@ -948,7 +951,7 @@ class MongoCollectionSpecification extends Specification { def 'should translate MongoBulkWriteException to MongoWriteException'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.insertOne(new Document('_id', 1)) @@ -970,7 +973,7 @@ class MongoCollectionSpecification extends Specification { new WriteConcernError(42, 'codeName', 'Message', new BsonDocument()), new ServerAddress(), [] as Set)]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) when: collection.insertOne(new Document('_id', 1)) @@ -986,8 +989,9 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new FindAndDeleteOperation(CSOT_NO_TIMEOUT.get(), namespace, ACKNOWLEDGED, retryWrites, + new DocumentCodec()) .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndDeleteMethod = collection.&findOneAndDelete @@ -999,14 +1003,19 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new FindAndDeleteOperation(CSOT_MAX_TIME.get(), namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .collation(collation) execute(findOneAndDeleteMethod, session, new Document('a', 1), - new FindOneAndDeleteOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).collation(collation)) + new FindOneAndDeleteOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .collation(collation)) operation = executor.getWriteOperation() as FindAndDeleteOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).collation(collation)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, session, retryWrites] << [ @@ -1022,9 +1031,10 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : WriteConcernResult.unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), - new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1))) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new FindAndReplaceOperation(CSOT_NO_TIMEOUT.get(), namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndReplaceMethod = collection.&findOneAndReplace when: @@ -1035,24 +1045,22 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new FindAndReplaceOperation(CSOT_MAX_TIME.get(), namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(false) + .collation(collation) execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndReplaceOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(false)) - operation = executor.getWriteOperation() as FindAndReplaceOperation - - then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(false)) - - when: - execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndReplaceOptions().projection(new Document('projection', 1)) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation)) + new FindOneAndReplaceOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(false) + .collation(collation)) operation = executor.getWriteOperation() as FindAndReplaceOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(true).collation(collation)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, session, retryWrites] << [ @@ -1068,9 +1076,10 @@ class MongoCollectionSpecification extends Specification { writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() }) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, - retryWrites, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, new DocumentCodec(), - new BsonDocument('a', new BsonInt32(10))).filter(new BsonDocument('a', new BsonInt32(1))) + retryWrites, true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new FindAndUpdateOperation(CSOT_NO_TIMEOUT.get(), namespace, writeConcern, retryWrites, + new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) def findOneAndUpdateMethod = collection.&findOneAndUpdate when: @@ -1081,15 +1090,25 @@ class MongoCollectionSpecification extends Specification { expect operation, isTheSameAs(expectedOperation) when: + expectedOperation = new FindAndUpdateOperation(CSOT_MAX_TIME.get(), namespace, writeConcern, retryWrites, new DocumentCodec(), + new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters) + execute(findOneAndUpdateMethod, session, new Document('a', 1), new Document('a', 10), - new FindOneAndUpdateOptions().projection(new Document('projection', 1)).maxTime(100, MILLISECONDS) - .bypassDocumentValidation(bypassDocumentValidation).collation(collation).arrayFilters(arrayFilters)) + new FindOneAndUpdateOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters)) operation = executor.getWriteOperation() as FindAndUpdateOperation then: - expect operation, isTheSameAs(expectedOperation.projection(new BsonDocument('projection', new BsonInt32(1))) - .maxTime(100, MILLISECONDS).bypassDocumentValidation(bypassDocumentValidation).collation(collation) - .arrayFilters(arrayFilters)) + expect operation, isTheSameAs(expectedOperation) where: [writeConcern, arrayFilters, bypassDocumentValidation, session, retryWrites] << [ @@ -1105,8 +1124,8 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new DropCollectionOperation(namespace, ACKNOWLEDGED) + true, true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new DropCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, ACKNOWLEDGED) def dropMethod = collection.&drop when: @@ -1125,12 +1144,12 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null, null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def createIndexMethod = collection.&createIndex def createIndexesMethod = collection.&createIndexes when: - def expectedOperation = new CreateIndexesOperation(namespace, + def expectedOperation = new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1)))], ACKNOWLEDGED) def indexName = execute(createIndexMethod, session, new Document('key', 1)) def operation = executor.getWriteOperation() as CreateIndexesOperation @@ -1140,7 +1159,7 @@ class MongoCollectionSpecification extends Specification { indexName == 'key_1' when: - expectedOperation = new CreateIndexesOperation(namespace, + expectedOperation = new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) def indexNames = execute(createIndexesMethod, session, [new IndexModel(new Document('key', 1)), @@ -1153,10 +1172,12 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) + expectedOperation = new CreateIndexesOperation(CSOT_MAX_TIME.get(), namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), + new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) indexNames = execute(createIndexesMethod, session, [new IndexModel(new Document('key', 1)), new IndexModel(new Document('key1', 1))], - new CreateIndexOptions().maxTime(10, MILLISECONDS)) + new CreateIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as CreateIndexesOperation then: @@ -1165,7 +1186,7 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = new CreateIndexesOperation(namespace, + expectedOperation = new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) .commitQuorum(CreateIndexCommitQuorum.VOTING_MEMBERS) @@ -1180,7 +1201,7 @@ class MongoCollectionSpecification extends Specification { indexNames == ['key_1', 'key1_1'] when: - expectedOperation = new CreateIndexesOperation(namespace, + expectedOperation = new CreateIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, [new IndexRequest(new BsonDocument('key', new BsonInt32(1))) .background(true) .unique(true) @@ -1238,7 +1259,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the createIndexes data correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: collection.createIndexes(null) @@ -1258,7 +1279,7 @@ class MongoCollectionSpecification extends Specification { def batchCursor = Stub(BatchCursor) def executor = new TestOperationExecutor([batchCursor, batchCursor, batchCursor]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def listIndexesMethod = collection.&listIndexes when: @@ -1266,7 +1287,7 @@ class MongoCollectionSpecification extends Specification { def operation = executor.getReadOperation() as ListIndexesOperation then: - expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, new DocumentCodec()).retryReads(true)) executor.getClientSession() == session when: @@ -1275,16 +1296,16 @@ class MongoCollectionSpecification extends Specification { indexes == [] then: - expect operation, isTheSameAs(new ListIndexesOperation(namespace, new BsonDocumentCodec()).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(CSOT_NO_TIMEOUT.get(), namespace, new BsonDocumentCodec()).retryReads(true)) executor.getClientSession() == session when: - execute(listIndexesMethod, session).batchSize(10).maxTime(10, MILLISECONDS).iterator() + execute(listIndexesMethod, session).batchSize(10).maxTime(100, MILLISECONDS).iterator() operation = executor.getReadOperation() as ListIndexesOperation then: - expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).batchSize(10) - .maxTime(10, MILLISECONDS).retryReads(true)) + expect operation, isTheSameAs(new ListIndexesOperation(CSOT_MAX_TIME.get(), namespace, new DocumentCodec()).batchSize(10) + .retryReads(true)) executor.getClientSession() == session where: @@ -1295,11 +1316,11 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def dropIndexMethod = collection.&dropIndex when: - def expectedOperation = new DropIndexOperation(namespace, 'indexName', ACKNOWLEDGED) + def expectedOperation = new DropIndexOperation(CSOT_NO_TIMEOUT.get(), namespace, 'indexName', ACKNOWLEDGED) execute(dropIndexMethod, session, 'indexName') def operation = executor.getWriteOperation() as DropIndexOperation @@ -1309,7 +1330,7 @@ class MongoCollectionSpecification extends Specification { when: def keys = new BsonDocument('x', new BsonInt32(1)) - expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) + expectedOperation = new DropIndexOperation(CSOT_NO_TIMEOUT.get(), namespace, keys, ACKNOWLEDGED) execute(dropIndexMethod, session, keys) operation = executor.getWriteOperation() as DropIndexOperation @@ -1318,8 +1339,8 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) - execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(10, MILLISECONDS)) + expectedOperation = new DropIndexOperation(CSOT_MAX_TIME.get(), namespace, keys, ACKNOWLEDGED) + execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation then: @@ -1334,8 +1355,8 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) - def expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) + true, true, readConcern, JAVA_LEGACY, null, null, executor) + def expectedOperation = new DropIndexOperation(CSOT_NO_TIMEOUT.get(), namespace, '*', ACKNOWLEDGED) def dropIndexesMethod = collection.&dropIndexes when: @@ -1347,8 +1368,8 @@ class MongoCollectionSpecification extends Specification { executor.getClientSession() == session when: - expectedOperation = expectedOperation.maxTime(10, MILLISECONDS) - execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(10, MILLISECONDS)) + expectedOperation = new DropIndexOperation(CSOT_MAX_TIME.get(), namespace, '*', ACKNOWLEDGED) + execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(100, MILLISECONDS)) operation = executor.getWriteOperation() as DropIndexOperation then: @@ -1363,10 +1384,10 @@ class MongoCollectionSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null]) def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def newNamespace = new MongoNamespace(namespace.getDatabaseName(), 'newName') def renameCollectionOptions = new RenameCollectionOptions().dropTarget(dropTarget) - def expectedOperation = new RenameCollectionOperation(namespace, newNamespace, ACKNOWLEDGED) + def expectedOperation = new RenameCollectionOperation(CSOT_NO_TIMEOUT.get(), namespace, newNamespace, ACKNOWLEDGED) def renameCollection = collection.&renameCollection when: @@ -1394,7 +1415,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([acknowledged(INSERT, 1, 0, [], [])]) def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def document = new ImmutableDocument(['a': 1]) when: @@ -1416,7 +1437,7 @@ class MongoCollectionSpecification extends Specification { def executor = new TestOperationExecutor([null]) def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, executor) + true, true, readConcern, JAVA_LEGACY, null, null, executor) def document = new ImmutableDocument(['a': 1]) when: @@ -1436,7 +1457,7 @@ class MongoCollectionSpecification extends Specification { def 'should validate the client session correctly'() { given: def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, - true, true, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + true, true, readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: collection.aggregate(null, [Document.parse('{$match:{}}')]) diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy index a084bc6bcc0..6bcbbfb8f72 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy @@ -44,6 +44,7 @@ import org.bson.codecs.UuidCodec import org.bson.codecs.ValueCodecProvider import spock.lang.Specification +import static com.mongodb.ClusterFixture.CSOT_NO_TIMEOUT import static com.mongodb.CustomMatchers.isTheSameAs import static com.mongodb.ReadPreference.primary import static com.mongodb.ReadPreference.primaryPreferred @@ -66,7 +67,7 @@ class MongoDatabaseSpecification extends Specification { def 'should throw IllegalArgumentException if name is invalid'() { when: new MongoDatabaseImpl('a.b', codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, null, new TestOperationExecutor([])) then: thrown(IllegalArgumentException) @@ -75,7 +76,7 @@ class MongoDatabaseSpecification extends Specification { def 'should throw IllegalArgumentException from getCollection if collectionName is invalid'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, null, new TestOperationExecutor([])) when: database.getCollection('') @@ -87,7 +88,7 @@ class MongoDatabaseSpecification extends Specification { def 'should return the correct name from getName'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, - JAVA_LEGACY, null, new TestOperationExecutor([])) + JAVA_LEGACY, null, null, new TestOperationExecutor([])) expect: database.getName() == name @@ -100,13 +101,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, true, readConcern, - C_SHARP_LEGACY, null, executor) + C_SHARP_LEGACY, null, null, executor) .withCodecRegistry(newCodecRegistry) then: (database.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY expect database, isTheSameAs(new MongoDatabaseImpl(name, database.getCodecRegistry(), readPreference, writeConcern, - false, true, readConcern, C_SHARP_LEGACY, null, executor)) + false, true, readConcern, C_SHARP_LEGACY, null, null, executor)) } def 'should behave correctly when using withReadPreference'() { @@ -116,13 +117,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) .withReadPreference(newReadPreference) then: database.getReadPreference() == newReadPreference expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, newReadPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor)) + readConcern, JAVA_LEGACY, null, null, executor)) } def 'should behave correctly when using withWriteConcern'() { @@ -132,13 +133,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) .withWriteConcern(newWriteConcern) then: database.getWriteConcern() == newWriteConcern expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, newWriteConcern, false, false, - readConcern, JAVA_LEGACY, null, executor)) + readConcern, JAVA_LEGACY, null, null, executor)) } def 'should behave correctly when using withReadConcern'() { @@ -148,13 +149,13 @@ class MongoDatabaseSpecification extends Specification { when: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) .withReadConcern(newReadConcern) then: database.getReadConcern() == newReadConcern expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - newReadConcern, JAVA_LEGACY, null, executor)) + newReadConcern, JAVA_LEGACY, null, null, executor)) } def 'should be able to executeCommand correctly'() { @@ -162,7 +163,7 @@ class MongoDatabaseSpecification extends Specification { def command = new BsonDocument('command', new BsonInt32(1)) def executor = new TestOperationExecutor([null, null, null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def runCommandMethod = database.&runCommand when: @@ -209,7 +210,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def dropMethod = database.&drop when: @@ -217,7 +218,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as DropDatabaseOperation then: - expect operation, isTheSameAs(new DropDatabaseOperation(name, writeConcern)) + expect operation, isTheSameAs(new DropDatabaseOperation(CSOT_NO_TIMEOUT.get(), name, writeConcern)) executor.getClientSession() == session where: @@ -228,7 +229,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([null, null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def listCollectionsMethod = database.&listCollections def listCollectionNamesMethod = database.&listCollectionNames @@ -237,14 +238,14 @@ class MongoDatabaseSpecification extends Specification { then: expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, - Document, codecRegistry, primary(), executor, false)) + Document, codecRegistry, primary(), executor, false, null)) when: listCollectionIterable = execute(listCollectionsMethod, session, BsonDocument) then: expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, - BsonDocument, codecRegistry, primary(), executor, false)) + BsonDocument, codecRegistry, primary(), executor, false, null)) when: def listCollectionNamesIterable = execute(listCollectionNamesMethod, session) @@ -252,7 +253,7 @@ class MongoDatabaseSpecification extends Specification { then: // listCollectionNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it expect listCollectionNamesIterable.getMapped(), isTheSameAs(new ListCollectionsIterableImpl<>(session, name, - true, BsonDocument, codecRegistry, primary(), executor, false)) + true, BsonDocument, codecRegistry, primary(), executor, false, null)) where: session << [null, Stub(ClientSession)] @@ -263,7 +264,7 @@ class MongoDatabaseSpecification extends Specification { def collectionName = 'collectionName' def executor = new TestOperationExecutor([null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def createCollectionMethod = database.&createCollection when: @@ -271,7 +272,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern)) + expect operation, isTheSameAs(new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), name, collectionName, writeConcern)) executor.getClientSession() == session when: @@ -290,7 +291,7 @@ class MongoDatabaseSpecification extends Specification { operation = executor.getWriteOperation() as CreateCollectionOperation then: - expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern) + expect operation, isTheSameAs(new CreateCollectionOperation(CSOT_NO_TIMEOUT.get(), name, collectionName, writeConcern) .collation(collation) .capped(true) .maxDocuments(100) @@ -314,7 +315,7 @@ class MongoDatabaseSpecification extends Specification { def writeConcern = WriteConcern.JOURNALED def executor = new TestOperationExecutor([null, null]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def createViewMethod = database.&createView when: @@ -322,7 +323,7 @@ class MongoDatabaseSpecification extends Specification { def operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(CSOT_NO_TIMEOUT.get(), name, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) executor.getClientSession() == session @@ -331,7 +332,7 @@ class MongoDatabaseSpecification extends Specification { operation = executor.getWriteOperation() as CreateViewOperation then: - expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, + expect operation, isTheSameAs(new CreateViewOperation(CSOT_NO_TIMEOUT.get(), name, viewName, viewOn, [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) executor.getClientSession() == session @@ -344,7 +345,7 @@ class MongoDatabaseSpecification extends Specification { def viewName = 'view1' def viewOn = 'col1' def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: database.createView(viewName, viewOn, null) @@ -364,7 +365,7 @@ class MongoDatabaseSpecification extends Specification { def executor = new TestOperationExecutor([]) def namespace = new MongoNamespace(name, 'ignored') def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def watchMethod = database.&watch when: @@ -372,7 +373,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, - readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false, null), ['codec']) when: @@ -381,7 +382,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], Document, - ChangeStreamLevel.DATABASE, false), ['codec']) + ChangeStreamLevel.DATABASE, false, null), ['codec']) when: changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) @@ -389,7 +390,7 @@ class MongoDatabaseSpecification extends Specification { then: expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, - ChangeStreamLevel.DATABASE, false), ['codec']) + ChangeStreamLevel.DATABASE, false, null), ['codec']) where: session << [null, Stub(ClientSession)] @@ -399,7 +400,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) when: database.watch((Class) null) @@ -418,7 +419,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) def aggregateMethod = database.&aggregate when: @@ -427,7 +428,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, [], AggregationLevel.DATABASE, - false), ['codec']) + false, null), ['codec']) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)]) @@ -435,7 +436,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], - AggregationLevel.DATABASE, false), ['codec']) + AggregationLevel.DATABASE, false, null), ['codec']) when: aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) @@ -443,7 +444,7 @@ class MongoDatabaseSpecification extends Specification { then: expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, BsonDocument, codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], - AggregationLevel.DATABASE, false), ['codec']) + AggregationLevel.DATABASE, false, null), ['codec']) where: session << [null, Stub(ClientSession)] @@ -453,7 +454,7 @@ class MongoDatabaseSpecification extends Specification { given: def executor = new TestOperationExecutor([]) def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, - readConcern, JAVA_LEGACY, null, executor) + readConcern, JAVA_LEGACY, null, null, executor) when: database.aggregate(null, []) @@ -478,7 +479,7 @@ class MongoDatabaseSpecification extends Specification { given: def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) def database = new MongoDatabaseImpl('databaseName', codecRegistry, secondary(), WriteConcern.MAJORITY, true, true, - ReadConcern.MAJORITY, JAVA_LEGACY, null, new TestOperationExecutor([])) + ReadConcern.MAJORITY, JAVA_LEGACY, null, null, new TestOperationExecutor([])) when: def collection = database.getCollection('collectionName') @@ -489,14 +490,14 @@ class MongoDatabaseSpecification extends Specification { where: expectedCollection = new MongoCollectionImpl(new MongoNamespace('databaseName', 'collectionName'), Document, fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), secondary(), - WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, null, new TestOperationExecutor([])) } def 'should validate the client session correctly'() { given: def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, - false, readConcern, JAVA_LEGACY, null, Stub(OperationExecutor)) + false, readConcern, JAVA_LEGACY, null, null, Stub(OperationExecutor)) when: database.createCollection(null, 'newColl')