diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index d8fdb8356..4be3ff94b 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -8,10 +8,12 @@ on: branches: - main - '[0-9].*' + - 'feature/*' pull_request: branches: - main - '[0-9].*' + - 'feature/*' schedule: - cron: '0 1 * * *' # nightly build workflow_dispatch: diff --git a/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java b/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java index 0f5d755f4..497786990 100644 --- a/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java +++ b/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java @@ -48,6 +48,11 @@ import io.lettuce.core.protocol.CommandType; import io.lettuce.core.protocol.ProtocolKeyword; import io.lettuce.core.protocol.RedisCommand; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; import io.lettuce.core.vector.RawVector; import io.lettuce.core.vector.VectorMetadata; @@ -82,7 +87,7 @@ public abstract class AbstractRedisAsyncCommands implements RedisAclAsyncC RedisSortedSetAsyncCommands, RedisScriptingAsyncCommands, RedisServerAsyncCommands, RedisHLLAsyncCommands, BaseRedisAsyncCommands, RedisTransactionalAsyncCommands, RedisGeoAsyncCommands, RedisClusterAsyncCommands, RedisJsonAsyncCommands, - RedisVectorSetAsyncCommands { + RedisVectorSetAsyncCommands, RediSearchAsyncCommands { private final StatefulConnection connection; @@ -90,6 +95,8 @@ public abstract class AbstractRedisAsyncCommands implements RedisAclAsyncC private final RedisJsonCommandBuilder jsonCommandBuilder; + private final RediSearchCommandBuilder searchCommandBuilder; + private final RedisVectorSetCommandBuilder vectorSetCommandBuilder; private final Supplier parser; @@ -108,6 +115,7 @@ public AbstractRedisAsyncCommands(StatefulConnection connection, RedisCode this.commandBuilder = new RedisCommandBuilder<>(codec); this.jsonCommandBuilder = new RedisJsonCommandBuilder<>(codec, parser); this.vectorSetCommandBuilder = new RedisVectorSetCommandBuilder<>(codec, parser); + this.searchCommandBuilder = new RediSearchCommandBuilder<>(codec); } /** @@ -1520,6 +1528,61 @@ public boolean isOpen() { return connection.isOpen(); } + @Override + public RedisFuture ftCreate(K index, CreateArgs options, List> fieldArgs) { + return dispatch(searchCommandBuilder.ftCreate(index, options, fieldArgs)); + } + + @Override + public RedisFuture ftCreate(K index, List> fieldArgs) { + return dispatch(searchCommandBuilder.ftCreate(index, null, fieldArgs)); + } + + @Override + public RedisFuture ftDropindex(K index, boolean deleteDocumentKeys) { + return dispatch(searchCommandBuilder.ftDropindex(index, deleteDocumentKeys)); + } + + @Override + public RedisFuture ftDropindex(K index) { + return dispatch(searchCommandBuilder.ftDropindex(index, false)); + } + + @Override + public RedisFuture> ftSearch(K index, V query, SearchArgs args) { + return dispatch(searchCommandBuilder.ftSearch(index, query, args)); + } + + @Override + public RedisFuture> ftSearch(K index, V query) { + return dispatch(searchCommandBuilder.ftSearch(index, query, SearchArgs. builder().build())); + } + + @Override + public RedisFuture> ftAggregate(K index, V query, AggregateArgs args) { + return dispatch(searchCommandBuilder.ftAggregate(index, query, args)); + } + + @Override + public RedisFuture> ftAggregate(K index, V query) { + return dispatch(searchCommandBuilder.ftAggregate(index, query, null)); + } + + @Override + public RedisFuture> ftCursorread(K index, long cursorId, int count) { + return dispatch(searchCommandBuilder.ftCursorread(index, cursorId, count)); + } + + @Override + public RedisFuture> ftCursorread(K index, long cursorId) { + return dispatch(searchCommandBuilder.ftCursorread(index, cursorId)); + } + + @Override + public RedisFuture ftCursordel(K index, long cursorId) { + return dispatch(searchCommandBuilder.ftCursordel(index, cursorId)); + } + @Override public RedisFuture> jsonArrappend(K key, JsonPath jsonPath, JsonValue... values) { return dispatch(jsonCommandBuilder.jsonArrappend(key, jsonPath, values)); diff --git a/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java b/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java index 779d7c9b1..a6e838a07 100644 --- a/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java +++ b/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java @@ -49,6 +49,11 @@ import io.lettuce.core.protocol.RedisCommand; import io.lettuce.core.protocol.TracedCommand; import io.lettuce.core.resource.ClientResources; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; import io.lettuce.core.tracing.TraceContext; import io.lettuce.core.tracing.TraceContextProvider; import io.lettuce.core.tracing.Tracing; @@ -86,12 +91,13 @@ * @author Tihomir Mateev * @since 4.0 */ -public abstract class AbstractRedisReactiveCommands implements RedisAclReactiveCommands, - RedisHashReactiveCommands, RedisKeyReactiveCommands, RedisStringReactiveCommands, - RedisListReactiveCommands, RedisSetReactiveCommands, RedisSortedSetReactiveCommands, - RedisScriptingReactiveCommands, RedisServerReactiveCommands, RedisHLLReactiveCommands, - BaseRedisReactiveCommands, RedisTransactionalReactiveCommands, RedisGeoReactiveCommands, - RedisClusterReactiveCommands, RedisJsonReactiveCommands, RedisVectorSetReactiveCommands { +public abstract class AbstractRedisReactiveCommands + implements RedisAclReactiveCommands, RedisHashReactiveCommands, RedisKeyReactiveCommands, + RedisStringReactiveCommands, RedisListReactiveCommands, RedisSetReactiveCommands, + RedisSortedSetReactiveCommands, RedisScriptingReactiveCommands, RedisServerReactiveCommands, + RedisHLLReactiveCommands, BaseRedisReactiveCommands, RedisTransactionalReactiveCommands, + RedisGeoReactiveCommands, RedisClusterReactiveCommands, RedisJsonReactiveCommands, + RedisVectorSetReactiveCommands, RediSearchReactiveCommands { private final StatefulConnection connection; @@ -99,6 +105,8 @@ public abstract class AbstractRedisReactiveCommands implements RedisAclRea private final RedisJsonCommandBuilder jsonCommandBuilder; + private final RediSearchCommandBuilder searchCommandBuilder; + private final RedisVectorSetCommandBuilder vectorSetCommandBuilder; private final Supplier parser; @@ -123,6 +131,7 @@ public AbstractRedisReactiveCommands(StatefulConnection connection, RedisC this.commandBuilder = new RedisCommandBuilder<>(codec); this.jsonCommandBuilder = new RedisJsonCommandBuilder<>(codec, parser); this.vectorSetCommandBuilder = new RedisVectorSetCommandBuilder<>(codec, parser); + this.searchCommandBuilder = new RediSearchCommandBuilder<>(codec); this.clientResources = connection.getResources(); this.tracingEnabled = clientResources.tracing().isEnabled(); } @@ -1584,6 +1593,61 @@ public boolean isOpen() { return connection.isOpen(); } + @Override + public Mono ftCreate(K index, CreateArgs options, List> fieldArgs) { + return createMono(() -> searchCommandBuilder.ftCreate(index, options, fieldArgs)); + } + + @Override + public Mono ftCreate(K index, List> fieldArgs) { + return createMono(() -> searchCommandBuilder.ftCreate(index, null, fieldArgs)); + } + + @Override + public Mono ftCursordel(K index, long cursorId) { + return createMono(() -> searchCommandBuilder.ftCursordel(index, cursorId)); + } + + @Override + public Mono ftDropindex(K index, boolean deleteDocumentKeys) { + return createMono(() -> searchCommandBuilder.ftDropindex(index, deleteDocumentKeys)); + } + + @Override + public Mono ftDropindex(K index) { + return createMono(() -> searchCommandBuilder.ftDropindex(index, false)); + } + + @Override + public Mono> ftSearch(K index, V query, SearchArgs args) { + return createMono(() -> searchCommandBuilder.ftSearch(index, query, args)); + } + + @Override + public Mono> ftSearch(K index, V query) { + return createMono(() -> searchCommandBuilder.ftSearch(index, query, SearchArgs. builder().build())); + } + + @Override + public Mono> ftAggregate(K index, V query, AggregateArgs args) { + return createMono(() -> searchCommandBuilder.ftAggregate(index, query, args)); + } + + @Override + public Mono> ftAggregate(K index, V query) { + return createMono(() -> searchCommandBuilder.ftAggregate(index, query, null)); + } + + @Override + public Mono> ftCursorread(K index, long cursorId, int count) { + return createMono(() -> searchCommandBuilder.ftCursorread(index, cursorId, count)); + } + + @Override + public Mono> ftCursorread(K index, long cursorId) { + return createMono(() -> searchCommandBuilder.ftCursorread(index, cursorId)); + } + @Override public Flux jsonArrappend(K key, JsonPath jsonPath, JsonValue... values) { return createDissolvingFlux(() -> jsonCommandBuilder.jsonArrappend(key, jsonPath, values)); diff --git a/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java b/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java new file mode 100644 index 000000000..debc04ad9 --- /dev/null +++ b/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java @@ -0,0 +1,182 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core; + +import io.lettuce.core.codec.RedisCodec; +import io.lettuce.core.output.EncodedComplexOutput; +import io.lettuce.core.output.StatusOutput; +import io.lettuce.core.protocol.BaseRedisCommandBuilder; +import io.lettuce.core.protocol.Command; +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.SearchReplyParser; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +import java.util.List; + +import static io.lettuce.core.protocol.CommandType.*; + +/** + * Command builder for RediSearch commands. + * + * @param Key type. + * @param Value type. + * @since 6.8 + */ +class RediSearchCommandBuilder extends BaseRedisCommandBuilder { + + RediSearchCommandBuilder(RedisCodec codec) { + super(codec); + } + + /** + * Create a new index with the given name, index options and fieldArgs. + * + * @param index the index name + * @param createArgs the index options + * @param fieldArgs the fieldArgs + * @return the result of the create command + */ + public Command ftCreate(K index, CreateArgs createArgs, List> fieldArgs) { + notNullKey(index); + notEmpty(fieldArgs.toArray()); + + CommandArgs args = new CommandArgs<>(codec).addKey(index); + + if (createArgs != null) { + createArgs.build(args); + } + + args.add(CommandKeyword.SCHEMA); + + for (FieldArgs arg : fieldArgs) { + arg.build(args); + } + + return createCommand(FT_CREATE, new StatusOutput<>(codec), args); + + } + + /** + * Search the index with the given name using the specified query and search arguments. + * + * @param index the index name + * @param query the query + * @param searchArgs the search arguments + * @return the result of the search command + */ + public Command> ftSearch(K index, V query, SearchArgs searchArgs) { + notNullKey(index); + notNullKey(query); + + CommandArgs args = new CommandArgs<>(codec).addKey(index); + args.addValue(query); + + if (searchArgs != null) { + searchArgs.build(args); + } + + return createCommand(FT_SEARCH, new EncodedComplexOutput<>(codec, new SearchReplyParser<>(codec, searchArgs)), args); + } + + /** + * Run a search query on an index and perform aggregate transformations on the results. + * + * @param index the index name + * @param query the query + * @param aggregateArgs the aggregate arguments + * @return the result of the aggregate command + */ + public Command> ftAggregate(K index, V query, AggregateArgs aggregateArgs) { + notNullKey(index); + notNullKey(query); + + CommandArgs args = new CommandArgs<>(codec).addKey(index); + args.addValue(query); + + if (aggregateArgs != null) { + aggregateArgs.build(args); + } + + return createCommand(FT_AGGREGATE, new EncodedComplexOutput<>(codec, new SearchReplyParser<>(codec, null)), args); + } + + /** + * Read next results from an existing cursor. + * + * @param index the index name + * @param cursorId the cursor id + * @param count the number of results to read + * @return the result of the cursor read command + */ + public Command> ftCursorread(K index, long cursorId, int count) { + notNullKey(index); + + CommandArgs args = new CommandArgs<>(codec).add(CommandKeyword.READ).addKey(index); + args.add(cursorId); + args.add(CommandKeyword.COUNT); + args.add(count); + + return createCommand(FT_CURSOR, new EncodedComplexOutput<>(codec, new SearchReplyParser<>(codec, null)), args); + } + + /** + * Read next results from an existing cursor. + * + * @param index the index name + * @param cursorId the cursor id + * @return the result of the cursor read command + */ + public Command> ftCursorread(K index, long cursorId) { + notNullKey(index); + + CommandArgs args = new CommandArgs<>(codec).add(CommandKeyword.READ).addKey(index); + args.add(cursorId); + + return createCommand(FT_CURSOR, new EncodedComplexOutput<>(codec, new SearchReplyParser<>(codec, null)), args); + } + + /** + * Delete a cursor. + * + * @param index the index name + * @param cursorId the cursor id + * @return the result of the cursor delete command + */ + public Command ftCursordel(K index, long cursorId) { + notNullKey(index); + + CommandArgs args = new CommandArgs<>(codec).add(CommandKeyword.DEL).addKey(index); + args.add(cursorId); + + return createCommand(FT_CURSOR, new StatusOutput<>(codec), args); + } + + /** + * Drop the index with the given name. + * + * @param index the index name + * @param deleteDocumentKeys whether to delete the document keys + * @return the result of the drop command + */ + public Command ftDropindex(K index, boolean deleteDocumentKeys) { + notNullKey(index); + + CommandArgs args = new CommandArgs<>(codec).addKey(index); + + if (deleteDocumentKeys) { + args.add(CommandKeyword.DD); + } + + return createCommand(FT_DROPINDEX, new StatusOutput<>(codec), args); + } + +} diff --git a/src/main/java/io/lettuce/core/RedisCommandBuilder.java b/src/main/java/io/lettuce/core/RedisCommandBuilder.java index 6bcf4fa1b..b6b764e14 100644 --- a/src/main/java/io/lettuce/core/RedisCommandBuilder.java +++ b/src/main/java/io/lettuce/core/RedisCommandBuilder.java @@ -46,6 +46,7 @@ import static io.lettuce.core.protocol.CommandKeyword.*; import static io.lettuce.core.protocol.CommandType.*; import static io.lettuce.core.protocol.CommandType.COPY; +import static io.lettuce.core.protocol.CommandType.DEL; import static io.lettuce.core.protocol.CommandType.SAVE; /** diff --git a/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java b/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java new file mode 100644 index 000000000..7473f8116 --- /dev/null +++ b/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java @@ -0,0 +1,442 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.api.async; + +import java.util.List; +import io.lettuce.core.RedisFuture; +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +/** + * Asynchronous executed commands for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateAsyncApi + */ +public interface RediSearchAsyncCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + RedisFuture ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + RedisFuture ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + RedisFuture ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + RedisFuture ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + RedisFuture> ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + RedisFuture> ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + RedisFuture> ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + RedisFuture> ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + RedisFuture> ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + RedisFuture> ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + RedisFuture ftCursordel(K index, long cursorId); + +} diff --git a/src/main/java/io/lettuce/core/api/async/RedisAsyncCommands.java b/src/main/java/io/lettuce/core/api/async/RedisAsyncCommands.java index 80e30c227..30a3c8d8f 100644 --- a/src/main/java/io/lettuce/core/api/async/RedisAsyncCommands.java +++ b/src/main/java/io/lettuce/core/api/async/RedisAsyncCommands.java @@ -38,7 +38,8 @@ public interface RedisAsyncCommands extends BaseRedisAsyncCommands, RedisHashAsyncCommands, RedisHLLAsyncCommands, RedisKeyAsyncCommands, RedisListAsyncCommands, RedisScriptingAsyncCommands, RedisServerAsyncCommands, RedisSetAsyncCommands, RedisSortedSetAsyncCommands, RedisStreamAsyncCommands, RedisStringAsyncCommands, - RedisTransactionalAsyncCommands, RedisJsonAsyncCommands, RedisVectorSetAsyncCommands { + RedisTransactionalAsyncCommands, RedisJsonAsyncCommands, RedisVectorSetAsyncCommands, + RediSearchAsyncCommands { /** * Authenticate to the server. diff --git a/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java b/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java new file mode 100644 index 000000000..e651fab0f --- /dev/null +++ b/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java @@ -0,0 +1,443 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.api.reactive; + +import java.util.List; + +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import reactor.core.publisher.Mono; + +/** + * Reactive executed commands for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateReactiveApi + */ +public interface RediSearchReactiveCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + Mono ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + Mono ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + Mono ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + Mono ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + Mono> ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + Mono> ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Mono> ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + Mono> ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Mono> ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Mono> ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + Mono ftCursordel(K index, long cursorId); + +} diff --git a/src/main/java/io/lettuce/core/api/reactive/RedisReactiveCommands.java b/src/main/java/io/lettuce/core/api/reactive/RedisReactiveCommands.java index b1f0da828..19e9b6ff7 100644 --- a/src/main/java/io/lettuce/core/api/reactive/RedisReactiveCommands.java +++ b/src/main/java/io/lettuce/core/api/reactive/RedisReactiveCommands.java @@ -31,13 +31,13 @@ * @author Mark Paluch * @since 5.0 */ -public interface RedisReactiveCommands - extends BaseRedisReactiveCommands, RedisAclReactiveCommands, RedisClusterReactiveCommands, - RedisFunctionReactiveCommands, RedisGeoReactiveCommands, RedisHashReactiveCommands, - RedisHLLReactiveCommands, RedisKeyReactiveCommands, RedisListReactiveCommands, - RedisScriptingReactiveCommands, RedisServerReactiveCommands, RedisSetReactiveCommands, - RedisSortedSetReactiveCommands, RedisStreamReactiveCommands, RedisStringReactiveCommands, - RedisTransactionalReactiveCommands, RedisJsonReactiveCommands, RedisVectorSetReactiveCommands { +public interface RedisReactiveCommands extends BaseRedisReactiveCommands, RedisAclReactiveCommands, + RedisClusterReactiveCommands, RedisFunctionReactiveCommands, RedisGeoReactiveCommands, + RedisHashReactiveCommands, RedisHLLReactiveCommands, RedisKeyReactiveCommands, + RedisListReactiveCommands, RedisScriptingReactiveCommands, RedisServerReactiveCommands, + RedisSetReactiveCommands, RedisSortedSetReactiveCommands, RedisStreamReactiveCommands, + RedisStringReactiveCommands, RedisTransactionalReactiveCommands, RedisJsonReactiveCommands, + RedisVectorSetReactiveCommands, RediSearchReactiveCommands { /** * Authenticate to the server. diff --git a/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java b/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java new file mode 100644 index 000000000..44df0285a --- /dev/null +++ b/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java @@ -0,0 +1,442 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.api.sync; + +import java.util.List; + +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +/** + * Synchronous executed commands for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateSyncApi + */ +public interface RediSearchCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + String ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + String ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + String ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + String ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + SearchReply ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + SearchReply ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + SearchReply ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + String ftCursordel(K index, long cursorId); + +} diff --git a/src/main/java/io/lettuce/core/api/sync/RedisCommands.java b/src/main/java/io/lettuce/core/api/sync/RedisCommands.java index e413b6a35..32e178dc8 100644 --- a/src/main/java/io/lettuce/core/api/sync/RedisCommands.java +++ b/src/main/java/io/lettuce/core/api/sync/RedisCommands.java @@ -37,7 +37,7 @@ public interface RedisCommands extends BaseRedisCommands, RedisAclCo RedisFunctionCommands, RedisGeoCommands, RedisHashCommands, RedisHLLCommands, RedisKeyCommands, RedisListCommands, RedisScriptingCommands, RedisServerCommands, RedisSetCommands, RedisSortedSetCommands, RedisStreamCommands, RedisStringCommands, - RedisTransactionalCommands, RedisJsonCommands, RedisVectorSetCommands { + RedisTransactionalCommands, RedisJsonCommands, RedisVectorSetCommands, RediSearchCommands { /** * Authenticate to the server. diff --git a/src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java b/src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java new file mode 100644 index 000000000..218fdffab --- /dev/null +++ b/src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java @@ -0,0 +1,442 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.cluster.api.async; + +import java.util.List; + +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +/** + * Asynchronous executed commands on a node selection for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateAsyncNodeSelectionClusterApi + */ +public interface RediSearchAsyncCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + AsyncExecutions ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + AsyncExecutions ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + AsyncExecutions ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + AsyncExecutions ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + AsyncExecutions> ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + AsyncExecutions> ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + AsyncExecutions> ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + AsyncExecutions> ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + AsyncExecutions> ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + AsyncExecutions> ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + AsyncExecutions ftCursordel(K index, long cursorId); + +} diff --git a/src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java b/src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java new file mode 100644 index 000000000..1acb940d4 --- /dev/null +++ b/src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java @@ -0,0 +1,442 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.cluster.api.sync; + +import java.util.List; + +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +/** + * Synchronous executed commands on a node selection for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateSyncNodeSelectionClusterApi + */ +public interface RediSearchCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + Executions ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + Executions ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + Executions ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + Executions ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + Executions> ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + Executions> ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Executions> ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + Executions> ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Executions> ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + Executions> ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + Executions ftCursordel(K index, long cursorId); + +} diff --git a/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java b/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java index d29ea3118..c621df002 100644 --- a/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java @@ -34,6 +34,7 @@ * @param Value type. * @author Mark Paluch * @author dengliming + * @author Tihomir Mateev * @since 4.0 */ public interface RedisClusterCommands diff --git a/src/main/java/io/lettuce/core/output/ComplexOutput.java b/src/main/java/io/lettuce/core/output/ComplexOutput.java index 05bc6709a..5bc96023f 100644 --- a/src/main/java/io/lettuce/core/output/ComplexOutput.java +++ b/src/main/java/io/lettuce/core/output/ComplexOutput.java @@ -21,7 +21,7 @@ * example a map containing other maps, arrays or sets as values for one or more of its keys. *

* The implementation of the {@link ComplexDataParser} is responsible for mapping the data from the result to meaningful - * properties that the user of the LEttuce driver could then use in a statically typed manner. + * properties that the user of the Lettuce driver could then use in a statically typed manner. * * @see ComplexDataParser * @author Tihomir Mateev @@ -33,7 +33,7 @@ public class ComplexOutput extends CommandOutput { private final ComplexDataParser parser; - private ComplexData data; + protected ComplexData data; /** * Constructs a new instance of the {@link ComplexOutput} diff --git a/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java b/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java new file mode 100644 index 000000000..38d421901 --- /dev/null +++ b/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java @@ -0,0 +1,38 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.output; + +import io.lettuce.core.codec.RedisCodec; + +import java.nio.ByteBuffer; + +public class EncodedComplexOutput extends ComplexOutput { + + /** + * Constructs a new instance of the {@link ComplexOutput} + * + * @param codec the {@link RedisCodec} to be applied + * @param parser + */ + public EncodedComplexOutput(RedisCodec codec, ComplexDataParser parser) { + super(codec, parser); + } + + @Override + public void set(ByteBuffer bytes) { + data.storeObject(bytes.asReadOnlyBuffer()); + + } + + @Override + public void setSingle(ByteBuffer bytes) { + data.storeObject(bytes.asReadOnlyBuffer()); + + } + +} diff --git a/src/main/java/io/lettuce/core/protocol/CommandKeyword.java b/src/main/java/io/lettuce/core/protocol/CommandKeyword.java index 5ccfc6035..eceb7447f 100644 --- a/src/main/java/io/lettuce/core/protocol/CommandKeyword.java +++ b/src/main/java/io/lettuce/core/protocol/CommandKeyword.java @@ -51,7 +51,13 @@ public enum CommandKeyword implements ProtocolKeyword { WITHMATCHLEN, WITHSCORE, WITHSCORES, WITHVALUES, XOR, XX, FXX, YES, INDENT, NEWLINE, SPACE, GT, LT, - CAS, EF, ELE, SETATTR, M, NOQUANT, BIN, Q8, FILTER, FILTER_EF("FILTER-EF"), TRUTH, NOTHREAD, REDUCE, VALUES, RAW; + CAS, EF, ELE, SETATTR, M, NOQUANT, BIN, Q8, FILTER, FILTER_EF("FILTER-EF"), TRUTH, NOTHREAD, REDUCE, VALUES, RAW, + + MAXTEXTFIELDS, PREFIX, LANGUAGE, LANGUAGE_FIELD, SCORE, SCORE_FIELD, PAYLOAD_FIELD, TEMPORARY, NOOFFSETS, NOHL, NOFIELDS, NOFREQS, SKIPINITIALSCAN, STOPWORDS, AS, SORTABLE, SCHEMA, UNF, NOINDEX, + + NOSTEM, PHONETIC, WEIGHT, SEPARATOR, CASESENSITIVE, WITHSUFFIXTRIE, INDEXEMPTY, INDEXMISSING, DD, SORTBY, WITHCOUNT, SUMMARIZE, FRAGS, HIGHLIGHT, TAGS, DIALECT, PARAMS, TIMEOUT, SLOP, EXPLAINSCORE, PAYLOAD, + + SCORER, EXPANDER, INORDER, RETURN, INFIELDS, INKEYS, WITHSORTKEYS, WITHPAYLOADS, NOSTOPWORDS, VERBATIM, NOCONTENT, FLAT, SPHERICAL, HNSW, DIM, DISTANCE_METRIC, FLOAT32, FLOAT64, L2, COSINE, IP, WITHCURSOR, MAXIDLE, ADDSCORES, GROUPBY, APPLY, READ, DEL; public final byte[] bytes; diff --git a/src/main/java/io/lettuce/core/protocol/CommandType.java b/src/main/java/io/lettuce/core/protocol/CommandType.java index aed6e358b..b5666c186 100644 --- a/src/main/java/io/lettuce/core/protocol/CommandType.java +++ b/src/main/java/io/lettuce/core/protocol/CommandType.java @@ -116,6 +116,10 @@ public enum CommandType implements ProtocolKeyword { VADD, VCARD, VDIM, VEMB, VEMBRAW, VGETATTR, VINFO, VLINKS, VLINKSWITHSCORES, VRANDMEMBER, VREM, VSETATTR, VSIM, VSIMWITHSCORES, + // RediSearch + FT_AGGREGATE("FT.AGGREGATE"), FT_CREATE("FT.CREATE"), FT_CURSOR("FT.CURSOR"), FT_DROPINDEX("FT.DROPINDEX"), FT_SEARCH( + "FT.SEARCH"), + // Others TIME, WAIT, diff --git a/src/main/java/io/lettuce/core/search/SearchReply.java b/src/main/java/io/lettuce/core/search/SearchReply.java new file mode 100644 index 000000000..c48cae859 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/SearchReply.java @@ -0,0 +1,274 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Represents the results of a Redis FT.SEARCH command. + *

+ * This class encapsulates the search results including the total count of matching documents and a list of individual search + * result documents. Each document contains the document ID and optionally the document fields, score, payload, and sort keys + * depending on the search arguments used. + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @since 6.8 + * @see FT.SEARCH + */ +public class SearchReply { + + private long count; + + private final List> results; + + private Long cursorId; + + /** + * Creates a new empty SearchReply instance. + */ + public SearchReply() { + this.count = 0; + this.results = new ArrayList<>(); + this.cursorId = null; + } + + /** + * Creates a new SearchReply instance with the specified count and results. + * + * @param count the total number of matching documents + * @param results the list of search result documents + */ + SearchReply(long count, List> results) { + this.count = count; + this.results = new ArrayList<>(results); + this.cursorId = null; + } + + /** + * Gets the total number of matching documents. + *

+ * This represents the total count of documents that match the search query, which may be larger than the number of results + * returned if LIMIT was used. + * + * @return the total number of matching documents + */ + public long getCount() { + return count; + } + + /** + * Sets the total number of matching documents. + * + * @param count the total number of matching documents + */ + void setCount(long count) { + this.count = count; + } + + /** + * Gets the list of search result documents. + *

+ * Each result contains the document ID and optionally the document fields, score, payload, and sort keys depending on the + * search arguments used. + * + * @return an unmodifiable list of search result documents + */ + public List> getResults() { + return Collections.unmodifiableList(results); + } + + /** + * Adds a search result document to the results list. + * + * @param result the search result document to add + */ + public void addResult(SearchResult result) { + this.results.add(result); + } + + /** + * Gets the number of search result documents returned. + *

+ * This may be different from {@link #getCount()} if LIMIT was used in the search. + * + * @return the number of search result documents returned + */ + public int size() { + return results.size(); + } + + /** + * Checks if the search results are empty. + * + * @return true if no search result documents were returned, false otherwise + */ + public boolean isEmpty() { + return results.isEmpty(); + } + + /** + * Gets the cursor ID for paginated results. + *

+ * This is only available when using cursor-based pagination with FT.AGGREGATE WITHCURSOR. A cursor ID of 0 indicates that + * there are no more results to fetch. + * + * @return the cursor ID, or null if cursor-based pagination is not being used + */ + public Long getCursorId() { + return cursorId; + } + + /** + * Sets the cursor ID for paginated results. + * + * @param cursorId the cursor ID + */ + void setCursorId(Long cursorId) { + this.cursorId = cursorId; + } + + /** + * Represents a single search result document. + * + * @param Key type. + * @param Value type. + */ + public static class SearchResult { + + private final K id; + + private Double score; + + private V payload; + + private V sortKey; + + private final Map fields = new HashMap<>(); + + /** + * Creates a new SearchResult with the specified document ID. + * + * @param id the document ID + */ + public SearchResult(K id) { + this.id = id; + } + + public SearchResult() { + this.id = null; + } + + /** + * Gets the document ID. + * + * @return the document ID + */ + public K getId() { + return id; + } + + /** + * Gets the document score. + *

+ * This is only available if WITHSCORES was used in the search. + * + * @return the document score, or null if not available + */ + public Double getScore() { + return score; + } + + /** + * Sets the document score. + * + * @param score the document score + */ + void setScore(Double score) { + this.score = score; + } + + /** + * Gets the document payload. + *

+ * This is only available if WITHPAYLOADS was used in the search. + * + * @return the document payload, or null if not available + */ + public V getPayload() { + return payload; + } + + /** + * Sets the document payload. + * + * @param payload the document payload + */ + void setPayload(V payload) { + this.payload = payload; + } + + /** + * Gets the sort key. + *

+ * This is only available if WITHSORTKEYS was used in the search. + * + * @return the sort key, or null if not available + */ + public V getSortKey() { + return sortKey; + } + + /** + * Sets the sort key. + * + * @param sortKey the sort key + */ + void setSortKey(V sortKey) { + this.sortKey = sortKey; + } + + /** + * Gets the document fields. + *

+ * This contains the field names and values of the document. If NOCONTENT was used in the search, this will be null or + * empty. + * + * @return the document fields, or null if not available + */ + public Map getFields() { + return fields; + } + + /** + * Adds all the provided fields + * + * @param fields the document fields + */ + public void addFields(Map fields) { + this.fields.putAll(fields); + } + + /** + * Adds a single document field + * + * @param key the field name + * @param value the field value + */ + public void addFields(K key, V value) { + this.fields.put(key, value); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/SearchReplyParser.java b/src/main/java/io/lettuce/core/search/SearchReplyParser.java new file mode 100644 index 000000000..69b464851 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/SearchReplyParser.java @@ -0,0 +1,224 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.codec.RedisCodec; +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.output.ComplexData; +import io.lettuce.core.output.ComplexDataParser; +import io.lettuce.core.search.arguments.SearchArgs; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class SearchReplyParser implements ComplexDataParser> { + + private static final InternalLogger LOG = InternalLoggerFactory.getInstance(SearchReplyParser.class); + + private final RedisCodec codec; + + private final boolean withScores; + + private final boolean withContent; + + public SearchReplyParser(RedisCodec codec, SearchArgs args) { + this.codec = codec; + this.withScores = args != null && args.isWithScores(); + // this.withPayloads = args != null && args.isWithPayloads(); + // this.withSortKeys = args != null && args.isWithSortKeys(); + this.withContent = args == null || !args.isNoContent(); + } + + @Override + public SearchReply parse(ComplexData data) { + try { + data.getDynamicList(); + return new Resp2SearchResultsParser().parse(data); + } catch (UnsupportedOperationException e) { + return new Resp3SearchResultsParser().parse(data); + } + } + + class Resp2SearchResultsParser implements ComplexDataParser> { + + @Override + public SearchReply parse(ComplexData data) { + final SearchReply searchReply = new SearchReply<>(); + + final List resultsList = data.getDynamicList(); + + if (resultsList == null || resultsList.isEmpty()) { + return searchReply; + } + + // Check if this is a cursor response (has 2 elements: results array and cursor id) + if (resultsList.size() == 2 && resultsList.get(1) instanceof Long) { + // This is a cursor response: [results_array, cursor_id] + List actualResults = ((ComplexData) resultsList.get(0)).getDynamicList(); + Long cursorId = (Long) resultsList.get(1); + + searchReply.setCursorId(cursorId); + + if (actualResults == null || actualResults.isEmpty()) { + return searchReply; + } + + searchReply.setCount((Long) actualResults.get(0)); + + if (actualResults.size() == 1) { + return searchReply; + } + + // Parse the actual results + parseResults(searchReply, actualResults, 1); + } else { + // Regular search response + searchReply.setCount((Long) resultsList.get(0)); + + if (resultsList.size() == 1) { + return searchReply; + } + + // Parse the results + parseResults(searchReply, resultsList, 1); + } + + return searchReply; + } + + private void parseResults(SearchReply searchReply, List resultsList, int startIndex) { + for (int i = startIndex; i < resultsList.size(); i++) { + + final K id = codec.decodeKey((ByteBuffer) resultsList.get(i)); + final SearchReply.SearchResult searchResult = new SearchReply.SearchResult<>(id); + + if (withScores) { + searchResult.setScore(Double.parseDouble(StringCodec.UTF8.decodeKey((ByteBuffer) resultsList.get(i + 1)))); + i++; + } + + if (withContent) { + ComplexData resultData = (ComplexData) resultsList.get(i + 1); + List resultEntries = resultData.getDynamicList(); + + Map resultEntriesProcessed = IntStream.range(0, resultEntries.size() / 2).boxed() + .collect(Collectors.toMap(idx -> codec.decodeKey((ByteBuffer) resultEntries.get(idx * 2)), + idx -> codec.decodeValue((ByteBuffer) resultEntries.get(idx * 2 + 1)))); + + searchResult.addFields(resultEntriesProcessed); + i++; + } + + searchReply.addResult(searchResult); + } + } + + } + + class Resp3SearchResultsParser implements ComplexDataParser> { + + private final ByteBuffer ATTRIBUTES_KEY = StringCodec.UTF8.encodeKey("attributes"); + + private final ByteBuffer FORMAT_KEY = StringCodec.UTF8.encodeKey("format"); + + private final ByteBuffer RESULTS_KEY = StringCodec.UTF8.encodeKey("results"); + + private final ByteBuffer TOTAL_RESULTS_KEY = StringCodec.UTF8.encodeKey("total_results"); + + private final ByteBuffer WARNING_KEY = StringCodec.UTF8.encodeKey("warning"); + + private final ByteBuffer SCORE_KEY = StringCodec.UTF8.encodeKey("score"); + + private final ByteBuffer ID_KEY = StringCodec.UTF8.encodeKey("id"); + + private final ByteBuffer EXTRA_ATTRIBUTES_KEY = StringCodec.UTF8.encodeKey("extra_attributes"); + + private final ByteBuffer VALUES_KEY = StringCodec.UTF8.encodeKey("values"); + + private final ByteBuffer CURSOR_KEY = StringCodec.UTF8.encodeKey("cursor"); + + @Override + public SearchReply parse(ComplexData data) { + final SearchReply searchReply = new SearchReply<>(); + + final Map resultsMap = data.getDynamicMap(); + + if (resultsMap == null || resultsMap.isEmpty()) { + return searchReply; + } + + // FIXME Parse attributes? ATTRIBUTES_KEY + + // FIXME Parse format? FORMAT_KEY + + if (resultsMap.containsKey(RESULTS_KEY)) { + ComplexData results = (ComplexData) resultsMap.get(RESULTS_KEY); + + List a = resultsMap.keySet().stream().map(o -> (ByteBuffer) o).map(StringCodec.UTF8::decodeKey) + .collect(Collectors.toList()); + + results.getDynamicList().forEach(result -> { + ComplexData resultData = (ComplexData) result; + Map resultEntry = resultData.getDynamicMap(); + + SearchReply.SearchResult searchResult; + if (resultEntry.containsKey(ID_KEY)) { + final K id = codec.decodeKey((ByteBuffer) resultEntry.get(ID_KEY)); + searchResult = new SearchReply.SearchResult<>(id); + } else { + searchResult = new SearchReply.SearchResult<>(); + } + + if (resultEntry.containsKey(SCORE_KEY)) { + if (resultEntry.get(SCORE_KEY) instanceof Double) { + searchResult.setScore((Double) resultEntry.get(SCORE_KEY)); + } else { + ComplexData scores = (ComplexData) resultEntry.get(SCORE_KEY); + List scoresList = scores.getDynamicList(); + searchResult.setScore((Double) scoresList.get(0)); + } + } + + if (resultEntry.containsKey(EXTRA_ATTRIBUTES_KEY)) { + ComplexData extraAttributes = (ComplexData) resultEntry.get(EXTRA_ATTRIBUTES_KEY); + extraAttributes.getDynamicMap().forEach((key, value) -> { + K decodedKey = codec.decodeKey((ByteBuffer) key); + V decodedValue = codec.decodeValue((ByteBuffer) value); + searchResult.addFields(decodedKey, decodedValue); + }); + } + searchReply.addResult(searchResult); + }); + } + + if (resultsMap.containsKey(TOTAL_RESULTS_KEY)) { + searchReply.setCount((Long) resultsMap.get(TOTAL_RESULTS_KEY)); + } + + if (resultsMap.containsKey(CURSOR_KEY)) { + searchReply.setCursorId((Long) resultsMap.get(CURSOR_KEY)); + } + + if (resultsMap.containsKey(WARNING_KEY)) { + ComplexData warning = (ComplexData) resultsMap.get(WARNING_KEY); + warning.getDynamicList().forEach(warningEntry -> { + LOG.warn("Warning while parsing search results: {}", warningEntry); + }); + } + + return searchReply; + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java b/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java new file mode 100644 index 000000000..6ae995d49 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java @@ -0,0 +1,1087 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; + +import java.time.Duration; +import java.util.*; +import java.util.Arrays; + +/** + * Argument list builder for {@code FT.AGGREGATE} command. + * + *

+ * FT.AGGREGATE runs a search query on an index and performs aggregate transformations on the results. It provides a powerful + * aggregation pipeline that can group, sort, apply mathematical expressions, filter, and limit results in a single command. + *

+ * + *

Basic Usage:

+ * + *
+ * 
+ * {
+ *     @code
+ *     // Simple aggregation with grouping and counting
+ *     AggregateArgs args = AggregateArgs. builder().groupBy("category")
+ *             .reduce(Reducer.count().as("count")).sortBy("count", SortDirection.DESC).build();
+ *     SearchReply result = redis.ftAggregate("myindex", "*", args);
+ * }
+ * 
+ * + * + *

Advanced Pipeline Example:

+ * + *
+ * 
+ * {
+ *     @code
+ *     // Complex aggregation pipeline
+ *     AggregateArgs args = AggregateArgs. builder().load("price", "quantity", "category")
+ *             .apply("@price * @quantity", "total_value").filter("@total_value > 100").groupBy("category")
+ *             .reduce(Reducer.sum("@total_value").as("category_total")).reduce(Reducer.avg("@price").as("avg_price"))
+ *             .sortBy("category_total", SortDirection.DESC).limit(0, 10).dialect(QueryDialects.DIALECT2).build();
+ * }
+ * 
+ * + *

Supported Operations:

+ *
    + *
  • LOAD - Load document attributes from source documents
  • + *
  • GROUPBY - Group results by one or more properties with reducers
  • + *
  • SORTBY - Sort results by properties with ASC/DESC directions
  • + *
  • APPLY - Apply mathematical expressions to create computed fields
  • + *
  • FILTER - Filter results using predicate expressions
  • + *
  • LIMIT - Limit and paginate results
  • + *
  • WITHCURSOR - Use cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Attributes used in GROUPBY and SORTBY should be stored as SORTABLE for optimal performance
  • + *
  • LOAD operations can hurt performance as they require HMGET operations on each record
  • + *
  • Use SORTBY with MAX for efficient top-N queries
  • + *
  • Consider using WITHCURSOR for large result sets to avoid memory issues
  • + *
+ * + * @param Key type. + * @param Value type. + * @since 6.8 + * @author Tihomir Mateev + * @see FT.AGGREGATE + * @see Redis + * Aggregations Guide + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class AggregateArgs { + + private Optional verbatim = Optional.empty(); + + private final List> loadFields = new ArrayList<>(); + + private Optional timeout = Optional.empty(); + + private final List> groupByList = new ArrayList<>(); + + private final List> sortByList = new ArrayList<>(); + + private final List> applyList = new ArrayList<>(); + + private Optional limit = Optional.empty(); + + private final List filters = new ArrayList<>(); + + private Optional withCursor = Optional.empty(); + + private final Map params = new HashMap<>(); + + private Optional scorer = Optional.empty(); + + private Optional addScores = Optional.empty(); + + private QueryDialects dialect = QueryDialects.DIALECT2; + + /** + * Creates a new {@link AggregateArgs} instance. + * + * @param Key type. + * @param Value type. + * @return new instance of {@link AggregateArgs}. + */ + public static Builder builder() { + return new Builder<>(); + } + + /** + * Builder for {@link AggregateArgs}. + * + * @param Key type. + * @param Value type. + */ + public static class Builder { + + private final AggregateArgs args = new AggregateArgs<>(); + + /** + * Set VERBATIM flag - do not try to use stemming for query expansion. + * + *

+ * When set, the query terms are searched verbatim without attempting to use stemming for query expansion. This is + * useful when you want exact matches for your search terms. + *

+ * + * @return the builder. + */ + public Builder verbatim() { + args.verbatim = Optional.of(true); + return this; + } + + /** + * Load document attributes from the source document. + * + *

+ * Loads the specified field from the source document. For hash documents, this is the field name. For JSON documents, + * this can be a JSONPath expression. + *

+ * + *

+ * Performance Note: LOAD operations can significantly hurt performance as they require HMGET + * operations on each processed record. Consider storing frequently accessed attributes as SORTABLE for better + * performance. + *

+ * + * @param field the field identifier (field name for hashes, JSONPath for JSON) + * @return the builder. + */ + public Builder load(K field) { + args.loadFields.add(new LoadField<>(field, null)); + return this; + } + + /** + * Load document attributes from the source document with alias. + * + *

+ * Loads the specified field from the source document and assigns it an alias name for use in the aggregation pipeline. + * The alias can be referenced in subsequent GROUPBY, SORTBY, APPLY, and FILTER operations. + *

+ * + * @param field the field identifier (field name for hashes, JSONPath for JSON) + * @param alias the alias name to use in the result + * @return the builder. + */ + public Builder load(K field, K alias) { + args.loadFields.add(new LoadField<>(field, alias)); + return this; + } + + /** + * Load all document attributes. + * + *

+ * Equivalent to using {@code LOAD *} in the Redis command. This loads all attributes from the source documents. Use + * with caution as this can significantly impact performance when dealing with large documents or many results. + *

+ * + * @return the builder. + */ + public Builder loadAll() { + args.loadFields.add(new LoadField<>(null, null)); // Special case for * + return this; + } + + /** + * Set timeout for the aggregate operation. + * + * @param timeout the timeout duration + * @return the builder. + */ + public Builder timeout(Duration timeout) { + args.timeout = Optional.of(timeout); + return this; + } + + /** + * Add a GROUPBY clause. + * + * @param groupBy the group by specification + * @return the builder. + */ + public Builder groupBy(GroupBy groupBy) { + args.groupByList.add(groupBy); + return this; + } + + /** + * Add a SORTBY clause. + * + * @param sortBy the sort by specification + * @return the builder. + */ + public Builder sortBy(SortBy sortBy) { + args.sortByList.add(sortBy); + return this; + } + + /** + * Add an APPLY clause. + * + * @param apply the apply specification + * @return the builder. + */ + public Builder apply(Apply apply) { + args.applyList.add(apply); + return this; + } + + /** + * Set LIMIT clause for pagination. + * + *

+ * Limits the number of results to return just {@code num} results starting at index {@code offset} (zero-based). This + * is useful for pagination of results. + *

+ * + *

+ * Performance Note: It is much more efficient to use {@code SORTBY ... MAX} if you are only interested + * in limiting the output of a sort operation. Use LIMIT for pagination or when you need results without sorting. + *

+ * + *

Example:

+ * + *
+         * {@code
+         * // Get results 50-100 of the top 100 results efficiently
+         * .sortBy("score", SortDirection.DESC).max(100)
+         * .limit(50, 50)
+         * }
+         * 
+ * + * @param offset the zero-based starting index + * @param num the maximum number of results to return + * @return the builder. + */ + public Builder limit(long offset, long num) { + args.limit = Optional.of(new Limit(offset, num)); + return this; + } + + /** + * Add a FILTER clause for post-aggregation filtering. + * + *

+ * Filters the results using predicate expressions relating to values in each result. Filters are applied after the + * query and relate to the current state of the pipeline. This allows filtering on computed fields created by APPLY + * operations or reducer results. + *

+ * + *

Example Usage:

+ * + *
+         * {@code
+         * // Filter by numeric comparison
+         * .filter("@price > 100")
+         *
+         * // Filter by computed field
+         * .apply("@price * @quantity", "total_value")
+         * .filter("@total_value > 1000")
+         *
+         * // Filter by reducer result
+         * .groupBy("category").reduce(Reducer.count().as("count"))
+         * .filter("@count >= 5")
+         * }
+         * 
+ * + * @param filter the filter expression (e.g., "@price > 100", "@category == 'electronics'") + * @return the builder. + */ + public Builder filter(V filter) { + args.filters.add(filter); + return this; + } + + /** + * Set WITHCURSOR clause for cursor-based pagination. + * + *

+ * Enables cursor-based pagination as a quicker alternative to LIMIT for scanning through large result sets. This is + * particularly useful when you need to process all results but want to avoid memory issues with very large datasets. + *

+ * + *

Example Usage:

+ * + *
+         * {@code
+         * // Basic cursor with read size
+         * .withCursor(WithCursor.of(1000L))
+         *
+         * // Cursor with read size and idle timeout
+         * .withCursor(WithCursor.of(1000L, Duration.ofMinutes(5)))
+         * }
+         * 
+ * + *

+ * Use {@link io.lettuce.core.search.commands.FT#CURSOR_READ} and {@link io.lettuce.core.search.commands.FT#CURSOR_DEL} + * to iterate through and manage the cursor. + *

+ * + * @param withCursor the cursor specification with count and optional idle timeout + * @return the builder. + */ + public Builder withCursor(WithCursor withCursor) { + args.withCursor = Optional.of(withCursor); + return this; + } + + /** + * Add a parameter for parameterized queries. + * + *

+ * Defines a value parameter that can be referenced in the query using {@code $name}. Each parameter reference in the + * search query is substituted by the corresponding parameter value. This is useful for dynamic queries and prevents + * injection attacks. + *

+ * + *

+ * Note: To use PARAMS, set DIALECT to 2 or greater. + *

+ * + *

Example Usage:

+ * + *
+         * {@code
+         * // Define parameters
+         * AggregateArgs.builder()
+         *     .param("category", "electronics")
+         *     .param("min_price", "100")
+         *     .dialect(QueryDialects.DIALECT2)
+         *     .build();
+         *
+         * // Use in query: "@category:$category @price:[$min_price +inf]"
+         * }
+         * 
+ * + * @param name the parameter name (referenced as $name in query) + * @param value the parameter value + * @return the builder. + */ + public Builder param(K name, V value) { + args.params.put(name, value); + return this; + } + + /** + * Set SCORER clause. + * + * @param scorer the scorer function + * @return the builder. + */ + public Builder scorer(V scorer) { + args.scorer = Optional.of(scorer); + return this; + } + + /** + * Set ADDSCORES flag to expose full-text search scores. + * + *

+ * The ADDSCORES option exposes the full-text score values to the aggregation pipeline. You can then use + * {@code @__score} in subsequent pipeline operations like SORTBY, APPLY, FILTER, and GROUPBY. + *

+ * + *

Example Usage:

+ * + *
+         * {@code
+         * // Sort by search relevance score
+         * AggregateArgs.builder()
+         *     .addScores()
+         *     .sortBy("__score", SortDirection.DESC)
+         *     .build();
+         *
+         * // Filter by minimum score threshold
+         * AggregateArgs.builder()
+         *     .addScores()
+         *     .filter("@__score > 0.5")
+         *     .build();
+         * }
+         * 
+ * + * @return the builder. + */ + public Builder addScores() { + args.addScores = Optional.of(true); + return this; + } + + /** + * Set the query dialect. + * + * @param dialect the query dialect + * @return the builder. + */ + public Builder dialect(QueryDialects dialect) { + args.dialect = dialect; + return this; + } + + /** + * Convenience method to add a GROUPBY clause with properties. + * + * @param properties the properties to group by + * @return the builder. + */ + @SafeVarargs + public final Builder groupBy(K... properties) { + return groupBy(new GroupBy<>(Arrays.asList(properties))); + } + + /** + * Convenience method to add a SORTBY clause with a single property. + * + * @param property the property to sort by + * @param direction the sort direction + * @return the builder. + */ + public Builder sortBy(K property, SortDirection direction) { + return sortBy(new SortBy<>(Collections.singletonList(new SortProperty<>(property, direction)))); + } + + /** + * Convenience method to add an APPLY clause. + * + * @param expression the expression to apply + * @param name the result field name + * @return the builder. + */ + public Builder apply(V expression, K name) { + return apply(new Apply<>(expression, name)); + } + + /** + * Build the {@link AggregateArgs}. + * + * @return the built {@link AggregateArgs}. + */ + public AggregateArgs build() { + return args; + } + + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + verbatim.ifPresent(v -> args.add(CommandKeyword.VERBATIM)); + + if (!loadFields.isEmpty()) { + args.add(CommandKeyword.LOAD); + if (loadFields.size() == 1 && loadFields.get(0).field == null) { + // LOAD * + args.add("*"); + } else { + // Count the total number of arguments (field + optional AS + alias) + int argCount = 0; + for (LoadField loadField : loadFields) { + argCount++; // field + if (loadField.alias != null) { + argCount += 2; // AS + alias + } + } + args.add(argCount); + for (LoadField loadField : loadFields) { + args.addKey(loadField.field); + if (loadField.alias != null) { + args.add(CommandKeyword.AS); + args.addKey(loadField.alias); + } + } + } + } + + timeout.ifPresent(t -> { + args.add(CommandKeyword.TIMEOUT); + args.add(t.toMillis()); + }); + + // Add GROUPBY clauses + for (GroupBy groupBy : groupByList) { + groupBy.build(args); + } + + // Add SORTBY clauses + for (SortBy sortBy : sortByList) { + sortBy.build(args); + } + + // Add APPLY clauses + for (Apply apply : applyList) { + apply.build(args); + } + + // Add LIMIT clause + limit.ifPresent(l -> { + args.add(CommandKeyword.LIMIT); + args.add(l.offset); + args.add(l.num); + }); + + // Add FILTER clauses + for (V filter : filters) { + args.add(CommandKeyword.FILTER); + args.addValue(filter); + } + + // Add WITHCURSOR clause + withCursor.ifPresent(wc -> { + args.add(CommandKeyword.WITHCURSOR); + wc.count.ifPresent(c -> { + args.add(CommandKeyword.COUNT); + args.add(c); + }); + wc.maxIdle.ifPresent(mi -> { + args.add(CommandKeyword.MAXIDLE); + args.add(mi.toMillis()); + }); + }); + + if (!params.isEmpty()) { + args.add(CommandKeyword.PARAMS); + args.add(params.size() * 2L); + params.forEach((key, value) -> { + args.addKey(key); + args.addValue(value); + }); + } + + scorer.ifPresent(s -> { + args.add(CommandKeyword.SCORER); + args.addValue(s); + }); + + addScores.ifPresent(v -> args.add(CommandKeyword.ADDSCORES)); + + args.add(CommandKeyword.DIALECT); + args.add(dialect.toString()); + } + + // Helper classes + public static class LoadField { + + final K field; + + final K alias; + + LoadField(K field, K alias) { + this.field = field; + this.alias = alias; + } + + } + + public static class Limit { + + final long offset; + + final long num; + + Limit(long offset, long num) { + this.offset = offset; + this.num = num; + } + + } + + public static class WithCursor { + + final Optional count; + + final Optional maxIdle; + + public WithCursor(Long count, Optional maxIdle) { + this.count = Optional.ofNullable(count); + this.maxIdle = maxIdle; + } + + /** + * Static factory method to create an WithCursor instance with a single name and expression pair. + * + * @param count the name of the expression + * @param maxIdle the expression to apply + * @return new Apply instance + */ + public static WithCursor of(Long count, Duration maxIdle) { + return new WithCursor(count, Optional.of(maxIdle)); + } + + /** + * Static factory method to create an WithCursor instance with a single name and expression pair. + * + * @param count the name of the expression + * @return new Apply instance + */ + public static WithCursor of(Long count) { + return new WithCursor(count, Optional.empty()); + } + + } + + /** + * Represents a GROUPBY clause in an aggregation pipeline. + * + *

+ * Groups the results in the pipeline based on one or more properties. Each group should have at least one reducer function + * that handles the group entries, either counting them or performing multiple aggregate operations. + *

+ * + *

Example Usage:

+ * + *
+     *
+     * {
+     *     @code
+     *     // Group by category and count items
+     *     GroupBy groupBy = GroupBy.of("category").reduce(Reducer.count().as("item_count"));
+     *
+     *     // Group by multiple fields with multiple reducers
+     *     GroupBy complexGroup = GroupBy.of("category", "brand").reduce(Reducer.count().as("count"))
+     *             .reduce(Reducer.avg("@price").as("avg_price")).reduce(Reducer.sum("@quantity").as("total_quantity"));
+     * }
+     * 
+ * + *

Supported Reducers:

+ *
    + *
  • COUNT - Count the number of records in each group
  • + *
  • SUM - Sum numeric values within each group
  • + *
  • AVG - Calculate average of numeric values
  • + *
  • MIN/MAX - Find minimum/maximum values
  • + *
  • COUNT_DISTINCT - Count distinct values
  • + *
+ * + *

+ * Performance Note: Properties used in GROUPBY should be stored as SORTABLE in the index for optimal + * performance. + *

+ */ + public static class GroupBy { + + private final List properties; + + private final List> reducers; + + public GroupBy(List properties) { + this.properties = new ArrayList<>(properties); + this.reducers = new ArrayList<>(); + } + + public GroupBy reduce(Reducer reducer) { + this.reducers.add(reducer); + return this; + } + + /** + * Static factory method to create a GroupBy instance. + * + * @param properties the properties to group by + * @param Key type + * @param Value type + * @return new GroupBy instance + */ + @SafeVarargs + public static GroupBy of(K... properties) { + return new GroupBy<>(Arrays.asList(properties)); + } + + public void build(CommandArgs args) { + args.add(CommandKeyword.GROUPBY); + args.add(properties.size()); + for (K property : properties) { + // Add @ prefix if not already present + String propertyStr = property.toString(); + if (!propertyStr.startsWith("@")) { + args.add("@" + propertyStr); + } else { + args.addKey(property); + } + } + + for (Reducer reducer : reducers) { + reducer.build(args); + } + } + + } + + /** + * Represents a SORTBY clause in an aggregation pipeline. + * + *

+ * Sorts the pipeline results up until the point of SORTBY, using a list of properties. By default, sorting is ascending, + * but ASC or DESC can be specified for each property. + *

+ * + *

Example Usage:

+ * + *
+     *
+     * {
+     *     @code
+     *     // Simple sort by single field
+     *     SortBy sortBy = SortBy.of("price", SortDirection.DESC);
+     *
+     *     // Sort with MAX optimization for top-N queries
+     *     SortBy topN = SortBy.of("score", SortDirection.DESC).max(100) // Only sort top 100 results
+     *             .withCount(); // Include accurate count
+     *
+     *     // Multiple sort criteria
+     *     SortBy multiSort = SortBy.of(new SortProperty<>("category", SortDirection.ASC),
+     *             new SortProperty<>("price", SortDirection.DESC));
+     * }
+     * 
+ * + *

Performance Optimizations:

+ *
    + *
  • MAX - Optimizes sorting by only processing the top N results
  • + *
  • WITHCOUNT - Returns accurate counts but processes all results
  • + *
  • SORTABLE fields - Use SORTABLE attribute in index for best performance
  • + *
+ * + *

+ * Performance Note: Use {@code max()} for efficient top-N queries instead of sorting all results and then + * using LIMIT. + *

+ */ + public static class SortBy { + + private final List> properties; + + private Optional max = Optional.empty(); + + private boolean withCount = false; + + public SortBy(List> properties) { + this.properties = new ArrayList<>(properties); + } + + public SortBy max(long max) { + this.max = Optional.of(max); + return this; + } + + public SortBy withCount() { + this.withCount = true; + return this; + } + + /** + * Static factory method to create a SortBy instance with a single property. + * + * @param property the property to sort by + * @param direction the sort direction + * @param Key type + * @return new SortBy instance + */ + public static SortBy of(K property, SortDirection direction) { + return new SortBy<>(Collections.singletonList(new SortProperty<>(property, direction))); + } + + /** + * Static factory method to create a SortBy instance with multiple properties. + * + * @param properties the properties to sort by + * @param Key type + * @return new SortBy instance + */ + @SafeVarargs + public static SortBy of(SortProperty... properties) { + return new SortBy<>(Arrays.asList(properties)); + } + + public void build(CommandArgs args) { + args.add(CommandKeyword.SORTBY); + // Count includes property + direction pairs + args.add(properties.size() * 2L); + for (SortProperty property : properties) { + // Add @ prefix if not already present + String propertyStr = property.property.toString(); + if (!propertyStr.startsWith("@")) { + args.add("@" + propertyStr); + } else { + args.addKey(property.property); + } + args.add(property.direction.name()); + } + + max.ifPresent(m -> { + args.add(CommandKeyword.MAX); + args.add(m); + }); + + if (withCount) { + args.add(CommandKeyword.WITHCOUNT); + } + } + + } + + /** + * Represents an APPLY clause in an aggregation pipeline. + * + *

+ * Applies a 1-to-1 transformation on one or more properties and either stores the result as a new property down the + * pipeline or replaces any property using this transformation. APPLY can perform arithmetic operations on numeric + * properties or apply functions depending on property types. + *

+ * + *

Example Usage:

+ * + *
+     * 
+     * {
+     *     @code
+     *     // Calculate total value from price and quantity
+     *     Apply totalValue = new Apply<>("@price * @quantity", "total_value");
+     *
+     *     // Mathematical operations
+     *     Apply discount = new Apply<>("@price * 0.9", "discounted_price");
+     *
+     *     // String operations
+     *     Apply fullName = new Apply<>("@first_name + ' ' + @last_name", "full_name");
+     *
+     *     // Date operations
+     *     Apply dayOfWeek = new Apply<>("day(@timestamp)", "day");
+     * }
+     * 
+ * + *

Supported Operations:

+ *
    + *
  • Arithmetic: +, -, *, /, %, ^
  • + *
  • Mathematical functions: sqrt(), log(), abs(), ceil(), floor()
  • + *
  • String functions: upper(), lower(), substr()
  • + *
  • Date functions: day(), hour(), minute(), month(), year()
  • + *
  • Geo functions: geodistance()
  • + *
+ * + *

+ * The expression is evaluated dynamically for each record in the pipeline and the result is stored as a new property that + * can be referenced by further operations. + *

+ */ + public static class Apply { + + private final V expression; + + private final K name; + + public Apply(V expression, K name) { + this.expression = expression; + this.name = name; + } + + public void build(CommandArgs args) { + args.add(CommandKeyword.APPLY); + args.addValue(expression); + args.add(CommandKeyword.AS); + args.addKey(name); + } + + /** + * Static factory method to create an Apply instance with a single name and expression pair. + * + * @param name the name of the expression + * @param expression the expression to apply + * @param Key type + * @param Value type + * @return new Apply instance + */ + public static Apply of(V expression, K name) { + return new Apply<>(expression, name); + } + + } + + /** + * Represents a REDUCE function in a GROUPBY clause. + * + *

+ * Reducers handle group entries in a GROUPBY operation, performing aggregate operations like counting, summing, averaging, + * or finding min/max values. Each reducer can have an optional alias using the AS keyword. + *

+ * + *

Example Usage:

+ * + *
+     * 
+     * {
+     *     @code
+     *     // Count items in each group
+     *     Reducer count = Reducer.count().as("item_count");
+     *
+     *     // Sum numeric values
+     *     Reducer totalSales = Reducer.sum("@sales").as("total_sales");
+     *
+     *     // Calculate average
+     *     Reducer avgPrice = Reducer.avg("@price").as("average_price");
+     *
+     *     // Find extremes
+     *     Reducer maxScore = Reducer.max("@score").as("highest_score");
+     *     Reducer minPrice = Reducer.min("@price").as("lowest_price");
+     *
+     *     // Count distinct values
+     *     Reducer uniqueUsers = Reducer.countDistinct("@user_id").as("unique_users");
+     * }
+     * 
+ * + *

Available Reducer Functions:

+ *
    + *
  • COUNT - Count the number of records in the group
  • + *
  • SUM - Sum all numeric values of a field
  • + *
  • AVG - Calculate the average of numeric values
  • + *
  • MIN - Find the minimum value
  • + *
  • MAX - Find the maximum value
  • + *
  • COUNT_DISTINCT - Count unique values of a field
  • + *
+ * + *

+ * If no alias is provided using {@code as()}, the resulting field name will be the function name combined with the field + * name (e.g., "count_distinct(@user_id)"). + *

+ */ + public static class Reducer { + + private final String function; + + private final List args; + + private Optional alias = Optional.empty(); + + public Reducer(String function, List args) { + this.function = function; + this.args = new ArrayList<>(args); + } + + public Reducer as(K alias) { + this.alias = Optional.of(alias); + return this; + } + + /** + * Static factory method to create a COUNT reducer. + * + * @param Key type + * @param Value type + * @return new COUNT Reducer instance + */ + public static Reducer count() { + return new Reducer<>("COUNT", Collections.emptyList()); + } + + /** + * Static factory method to create a SUM reducer. + * + * @param field the field to sum + * @param Key type + * @param Value type + * @return new SUM Reducer instance + */ + public static Reducer sum(V field) { + return new Reducer<>("SUM", Collections.singletonList(field)); + } + + /** + * Static factory method to create an AVG reducer. + * + * @param field the field to average + * @param Key type + * @param Value type + * @return new AVG Reducer instance + */ + public static Reducer avg(V field) { + return new Reducer<>("AVG", Collections.singletonList(field)); + } + + /** + * Static factory method to create a MIN reducer. + * + * @param field the field to find minimum + * @param Key type + * @param Value type + * @return new MIN Reducer instance + */ + public static Reducer min(V field) { + return new Reducer<>("MIN", Collections.singletonList(field)); + } + + /** + * Static factory method to create a MAX reducer. + * + * @param field the field to find maximum + * @param Key type + * @param Value type + * @return new MAX Reducer instance + */ + public static Reducer max(V field) { + return new Reducer<>("MAX", Collections.singletonList(field)); + } + + /** + * Static factory method to create a COUNT_DISTINCT reducer. + * + * @param field the field to count distinct values + * @param Key type + * @param Value type + * @return new COUNT_DISTINCT Reducer instance + */ + public static Reducer countDistinct(V field) { + return new Reducer<>("COUNT_DISTINCT", Collections.singletonList(field)); + } + + public void build(CommandArgs args) { + args.add(CommandKeyword.REDUCE); + args.add(function); + args.add(this.args.size()); + for (V arg : this.args) { + args.addValue(arg); + } + + alias.ifPresent(a -> { + args.add(CommandKeyword.AS); + args.addKey(a); + }); + } + + } + + /** + * Represents a sort property with direction. + */ + public static class SortProperty { + + final K property; + + final SortDirection direction; + + public SortProperty(K property, SortDirection direction) { + this.property = property; + this.direction = direction; + } + + } + + /** + * Sort direction enumeration. + */ + public enum SortDirection { + ASC, DESC + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java b/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java new file mode 100644 index 000000000..94240c1f0 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java @@ -0,0 +1,537 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.OptionalDouble; +import java.util.OptionalLong; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Argument list builder for {@code FT.CREATE}. + * + * @param Key type. + * @param Value type. + * @see FT.CREATE + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class CreateArgs { + + /** + * Possible target types for the index. + */ + public enum TargetType { + HASH, JSON + } + + private Optional on = Optional.of(TargetType.HASH); + + private final List prefixes = new ArrayList<>(); + + private Optional filter = Optional.empty(); + + private Optional defaultLanguage = Optional.empty(); + + private Optional languageField = Optional.empty(); + + private OptionalDouble defaultScore = OptionalDouble.empty(); + + private Optional scoreField = Optional.empty(); + + private Optional payloadField = Optional.empty(); + + private boolean maxTextFields; + + private OptionalLong temporary = OptionalLong.empty(); + + private boolean noOffsets; + + private boolean noHighlight; + + private boolean noFields; + + private boolean noFrequency; + + private boolean skipInitialScan; + + private Optional> stopWords = Optional.empty(); + + /** + * Used to build a new instance of the {@link CreateArgs}. + * + * @return a {@link Builder} that provides the option to build up a new instance of the {@link CreateArgs} + * @param the key type + * @param the value type + */ + public static Builder builder() { + return new Builder<>(); + } + + /** + * Builder for {@link CreateArgs}. + *

+ * As a final step the {@link Builder#build()} method needs to be executed to create the final {@link CreateArgs} instance. + * + * @param the key type + * @param the value type + * @see FT.CREATE + */ + public static class Builder { + + private final CreateArgs instance = new CreateArgs<>(); + + /** + * Set the {@link TargetType} type for the index. Defaults to {@link TargetType#HASH}. + * + * @param targetType the target type + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder on(TargetType targetType) { + instance.on = Optional.of(targetType); + return this; + } + + /** + * Add a prefix to the index. You can add several prefixes to index. Default setting is * (all keys). + * + * @param prefix the prefix + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see {@link Builder#addPrefixes(List)} + */ + public Builder addPrefix(K prefix) { + instance.prefixes.add(prefix); + return this; + } + + /** + * Add a list of prefixes to the index. You can add several prefixes to index. Default setting is * (all keys). + * + * @param prefixes a {@link List} of prefixes + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder addPrefixes(List prefixes) { + instance.prefixes.addAll(prefixes); + return this; + } + + /** + * Set a filter for the index. Default setting is to have no filter. + *

+ * It is possible to use @__key to access the key that was just added/changed. A field can be used to set field name by + * passing 'FILTER @indexName=="myindexname"'. + * + * @param filter a filter expression with the full RediSearch aggregation expression language + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see RediSearch Query + */ + public Builder filter(V filter) { + instance.filter = Optional.of(filter); + return this; + } + + /** + * Set the default language for the documents in the index. The default setting is English. + * + * @param language the default language + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder defaultLanguage(DocumentLanguage language) { + instance.defaultLanguage = Optional.of(language); + return this; + } + + /** + * Set the field that contains the language setting for the documents in the index. The default setting is to have no + * language field. + * + * @param field the language field + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see Stemming + */ + public Builder languageField(K field) { + instance.languageField = Optional.of(field); + return this; + } + + /** + * Set the default score for the documents in the index. The default setting is 1.0. + * + * @param score the default score + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see Scoring + */ + public Builder defaultScore(double score) { + instance.defaultScore = OptionalDouble.of(score); + return this; + } + + /** + * Set the field that contains the score setting for the documents in the index. The default setting is a score of 1.0. + * + * @param field the score field + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see Scoring + */ + public Builder scoreField(K field) { + instance.scoreField = Optional.of(field); + return this; + } + + /** + * Set the field that contains the payload setting for the documents in the index. The default setting is to have no + * payload field. + *

+ * This should be a document attribute that you use as a binary safe payload string to the document that can be + * evaluated at query time by a custom scoring function or retrieved to the client + * + * @param field the payload field + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see Scoring + */ + public Builder payloadField(K field) { + instance.payloadField = Optional.of(field); + return this; + } + + /** + * Set the maximum number of text fields in the index. The default setting is to have no limit. + *

+ * Forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional + * attributes (beyond 32) using FT.ALTER. For efficiency, RediSearch encodes indexes differently if they are created + * with less than 32 text attributes. + * + * @param maxTextFields the maximum number of text fields + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder maxTextFields(boolean maxTextFields) { + instance.maxTextFields = maxTextFields; + return this; + } + + /** + * Set the temporary index expiration time in seconds. The default setting is to have no expiration time. + *

+ * Creates a lightweight temporary index that expires after a specified period of inactivity, in seconds. The internal + * idle timer is reset whenever the index is searched or added to. Because such indexes are lightweight, you can create + * thousands of such indexes without negative performance implications and, therefore, you should consider using + * {@link Builder#skipInitialScan(boolean)} to avoid costly scanning. + *

+ * Warning: When temporary indexes expire, they drop all the records associated with them. FT.DROPINDEX was introduced + * with a default of not deleting docs and a DD flag that enforced deletion. However, for temporary indexes, documents + * are deleted along with the index. Historically, RediSearch used an FT.ADD command, which made a connection between + * the document and the index. Then, FT.DROP, also a hystoric command, deleted documents by default. In version 2.x, + * RediSearch indexes hashes and JSONs, and the dependency between the index and documents no longer exists. + * + * @param seconds the temporary index expiration time in seconds + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder temporary(long seconds) { + instance.temporary = OptionalLong.of(seconds); + return this; + } + + /** + * Set the no offsets flag. The default setting is to have offsets. + *

+ * It saves memory, but does not allow exact searches or highlighting. It implies + * {@link Builder#noHighlighting(boolean)} is set to true. + * + * @param noOffsets the no offsets flag + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder noOffsets(boolean noOffsets) { + instance.noOffsets = noOffsets; + return this; + } + + /** + * Set the no highlighting flag. The default setting is to have highlighting. + *

+ * Conserves storage space and memory by disabling highlighting support. If set, the corresponding byte offsets for term + * positions are not stored. NOHL is also implied by NOOFFSETS. + * + * @param noHL the no highlighting flag + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder noHighlighting(boolean noHL) { + instance.noHighlight = noHL; + return this; + } + + /** + * Set the no fields flag. The default setting is to have fields. + *

+ * Does not store attribute bits for each term. It saves memory, but it does not allow filtering by specific attributes. + * + * @param noFields the no fields flag + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder noFields(boolean noFields) { + instance.noFields = noFields; + return this; + } + + /** + * Set the no frequency flag. The default setting is to have frequencies. + *

+ * Does not store the frequency of each term. It saves memory, but it does not allow sorting by frequency of a given + * term. + * + * @param noFreqs the no frequency flag + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder noFrequency(boolean noFreqs) { + instance.noFrequency = noFreqs; + return this; + } + + /** + * Set the skip initial scan flag. The default setting is to scan initially. + * + * @param skipInitialScan the skip initial scan flag + * @return the instance of the current {@link Builder} for the purpose of method chaining + */ + public Builder skipInitialScan(boolean skipInitialScan) { + instance.skipInitialScan = skipInitialScan; + return this; + } + + /** + * Set the index with a custom stopword list, to be ignored during indexing and search time. + *

+ * If not set, FT.CREATE takes the default list of stopwords. If {count} is set to 0, the index does not have stopwords. + * + * @param stopWords a list of stop words + * @return the instance of the current {@link Builder} for the purpose of method chaining + * @see Stop + * words + */ + public Builder stopWords(List stopWords) { + instance.stopWords = Optional.of(stopWords); + return this; + } + + public CreateArgs build() { + return instance; + } + + } + + /** + * Get the target type for the index. + * + * @return the target type + * @see TargetType + * @see Builder#on(TargetType) + */ + public Optional getOn() { + return on; + } + + /** + * Get the prefixes for the index. + * + * @return the prefixes + * @see Builder#addPrefix(Object) + * @see Builder#addPrefixes(List) + */ + public List getPrefixes() { + return prefixes; + } + + /** + * Get the filter for the index. + * + * @return the filter + * @see Builder#filter(Object) + */ + public Optional getFilter() { + return filter; + } + + /** + * Get the default language for the documents in the index. + * + * @return the default language + * @see Builder#defaultLanguage(DocumentLanguage) + */ + public Optional getDefaultLanguage() { + return defaultLanguage; + } + + /** + * Get the field that contains the language setting for the documents in the index. + * + * @return the language field + * @see Builder#languageField(Object) + */ + public Optional getLanguageField() { + return languageField; + } + + /** + * Get the default score for the documents in the index. + * + * @return the default score + * @see Builder#defaultScore(double) + */ + public OptionalDouble getDefaultScore() { + return defaultScore; + } + + /** + * Get the field that contains the score setting for the documents in the index. + * + * @return the score field + * @see Builder#scoreField(Object) + */ + public Optional getScoreField() { + return scoreField; + } + + /** + * Get the field that contains the payload setting for the documents in the index. + * + * @return the payload field + * @see Builder#payloadField(Object) + */ + public Optional getPayloadField() { + return payloadField; + } + + /** + * Get the maximum number of text fields in the index. + * + * @return the maximum number of text fields + * @see Builder#maxTextFields(boolean) + */ + public boolean isMaxTextFields() { + return maxTextFields; + } + + /** + * Get the temporary index expiration time in seconds. + * + * @return the temporary index expiration time in seconds + * @see Builder#temporary(long) + */ + public OptionalLong getTemporary() { + return temporary; + } + + /** + * Get the no offsets flag. + * + * @return the no offsets flag + * @see Builder#noOffsets(boolean) + */ + public boolean isNoOffsets() { + return noOffsets; + } + + /** + * Get the no highlighting flag. + * + * @return the no highlighting flag + * @see Builder#noHighlighting(boolean) + */ + public boolean isNoHighlight() { + return noHighlight; + } + + /** + * Get the no fields flag. + * + * @return the no fields flag + * @see Builder#noFields(boolean) + */ + public boolean isNoFields() { + return noFields; + } + + /** + * Get the no frequency flag. + * + * @return the no frequency flag + * @see Builder#noFrequency(boolean) + */ + public boolean isNoFrequency() { + return noFrequency; + } + + /** + * Get the skip initial scan flag. + * + * @return the skip initial scan flag + * @see Builder#skipInitialScan(boolean) + */ + public boolean isSkipInitialScan() { + return skipInitialScan; + } + + /** + * Get the stop words for the index. + * + * @return the stop words + * @see Builder#stopWords(List) + */ + public Optional> getStopWords() { + return stopWords; + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + on.ifPresent(targetType -> args.add(ON).add(targetType.name())); + if (!prefixes.isEmpty()) { + args.add(PREFIX).add(prefixes.size()); + prefixes.forEach(args::addKey); + } + filter.ifPresent(filter -> args.add(FILTER).addValue(filter)); + defaultLanguage.ifPresent(language -> args.add(LANGUAGE).add(language.toString())); + languageField.ifPresent(field -> args.add(LANGUAGE_FIELD).addKey(field)); + defaultScore.ifPresent(score -> args.add(SCORE).add(score)); + scoreField.ifPresent(field -> args.add(SCORE_FIELD).addKey(field)); + payloadField.ifPresent(field -> args.add(PAYLOAD_FIELD).addKey(field)); + if (maxTextFields) { + args.add(MAXTEXTFIELDS); + } + temporary.ifPresent(seconds -> args.add(TEMPORARY).add(seconds)); + if (noOffsets) { + args.add(NOOFFSETS); + } + if (noHighlight) { + args.add(NOHL); + } + if (noFields) { + args.add(NOFIELDS); + } + if (noFrequency) { + args.add(NOFREQS); + } + if (skipInitialScan) { + args.add(SKIPINITIALSCAN); + } + stopWords.ifPresent(words -> { + args.add(STOPWORDS).add(words.size()); + words.forEach(args::addValue); + }); + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/DocumentLanguage.java b/src/main/java/io/lettuce/core/search/arguments/DocumentLanguage.java new file mode 100644 index 000000000..5bc2bf011 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/DocumentLanguage.java @@ -0,0 +1,145 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import java.util.Locale; + +/** + * Supported document languages. + * + * @since 6.8 + * @author Tihomir Mateev + * @see Stemming + */ +public enum DocumentLanguage { + + /** + * Arabic + */ + ARABIC("arabic", new Locale("ar")), + /** + * Armenian + */ + ARMENIAN("armenian", new Locale("hy")), + /** + * Danish + */ + DANISH("danish", new Locale("da")), + /** + * Dutch + */ + DUTCH("dutch", new Locale("nl")), + /** + * English + */ + ENGLISH("english", Locale.ENGLISH), + /** + * Finnish + */ + FINNISH("finnish", new Locale("fi")), + /** + * French + */ + FRENCH("french", Locale.FRENCH), + /** + * German + */ + GERMAN("german", Locale.GERMAN), + /** + * Hungarian + */ + HUNGARIAN("hungarian", new Locale("hu")), + /** + * Italian + */ + ITALIAN("italian", Locale.ITALIAN), + /** + * Norwegian + */ + NORWEGIAN("norwegian", new Locale("no")), + /** + * Portuguese + */ + PORTUGUESE("portuguese", new Locale("pt")), + /** + * Romanian + */ + ROMANIAN("romanian", new Locale("ro")), + /** + * Russian + */ + RUSSIAN("russian", new Locale("ru")), + /** + * Serbian + */ + SERBIAN("serbian", new Locale("sr")), + /** + * Spanish + */ + SPANISH("spanish", new Locale("es")), + /** + * Swedish + */ + SWEDISH("swedish", new Locale("sv")), + /** + * Tamil + */ + TAMIL("tamil", new Locale("ta")), + /** + * Turkish + */ + TURKISH("turkish", new Locale("tr")), + /** + * Yiddish + */ + YIDDISH("yiddish", new Locale("yi")), + /** + * Chinese + * + * @see Chinese + * support + */ + CHINESE("chinese", Locale.CHINESE); + + private final String language; + + private final Locale locale; + + DocumentLanguage(String language, Locale locale) { + this.language = language; + this.locale = locale; + } + + @Override + public String toString() { + return language; + } + + /** + * @return the {@link DocumentLanguage} as a {@link Locale} + */ + public Locale getLocale() { + return locale; + } + + /** + * Retrieve the {@link DocumentLanguage} for a given {@link Locale}. + * + * @param locale the locale + * @return the {@link DocumentLanguage} + */ + public static DocumentLanguage getLanguage(Locale locale) { + for (DocumentLanguage language : DocumentLanguage.values()) { + if (language.getLocale().getLanguage().equals(locale.getLanguage())) { + return language; + } + } + throw new UnsupportedOperationException("No language found for locale: " + locale); + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/FieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/FieldArgs.java new file mode 100644 index 000000000..86bc0dff8 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/FieldArgs.java @@ -0,0 +1,282 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.Optional; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Base class for field arguments in a RediSearch index. + *

+ * This class contains common options shared by all field types. Specific field types should extend this class and add their + * type-specific options. + * + * @param Key type + * @see Field + * and type options + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public abstract class FieldArgs { + + // Common field properties + protected K name; + + protected Optional as = Optional.empty(); + + protected boolean sortable; + + protected boolean unNormalizedForm; + + protected boolean noIndex; + + protected boolean indexEmpty; + + protected boolean indexMissing; + + /** + * Returns the field type. Subclasses must implement this method. + * + * @return the field type + */ + public abstract String getFieldType(); + + /** + * Get the field name. + * + * @return the field name + */ + public K getName() { + return name; + } + + /** + * Get the field alias. + * + * @return the field alias + */ + public Optional getAs() { + return as; + } + + /** + * Check if the field is sortable. + * + * @return true if sortable + */ + public boolean isSortable() { + return sortable; + } + + /** + * Check if the field uses unnormalized form. + * + * @return true if unnormalized form + */ + public boolean isUnNormalizedForm() { + return unNormalizedForm; + } + + /** + * Check if the field is not indexed. + * + * @return true if not indexed + */ + public boolean isNoIndex() { + return noIndex; + } + + /** + * Check if the field indexes empty values. + * + * @return true if indexes empty values + */ + public boolean isIndexEmpty() { + return indexEmpty; + } + + /** + * Check if the field indexes missing values. + * + * @return true if indexes missing values + */ + public boolean isIndexMissing() { + return indexMissing; + } + + /** + * Build the field arguments into the command. + * + * @param args the command arguments to modify + */ + public final void build(CommandArgs args) { + args.addKey(name); + as.ifPresent(a -> args.add(AS).addKey(a)); + args.add(getFieldType()); + + // Add type-specific arguments + buildTypeSpecificArgs(args); + + // Add common arguments + if (sortable) { + args.add(SORTABLE); + if (unNormalizedForm) { + args.add(UNF); + } + } + if (noIndex) { + args.add(NOINDEX); + } + if (indexEmpty) { + args.add(INDEXEMPTY); + } + if (indexMissing) { + args.add(INDEXMISSING); + } + } + + /** + * Add type-specific arguments to the command. Subclasses should override this method to add their specific arguments. + * + * @param args the command arguments to modify + */ + protected abstract void buildTypeSpecificArgs(CommandArgs args); + + /** + * Base builder for field arguments. + * + * @param Key type + * @param The concrete field args type + * @param The concrete builder type + */ + public abstract static class Builder, B extends Builder> { + + protected final T instance; + + /** + * Constructor for subclasses. + * + * @param instance the field args instance to build + */ + protected Builder(T instance) { + this.instance = instance; + } + + /** + * Returns this builder instance for method chaining. + * + * @return this builder instance + */ + @SuppressWarnings("unchecked") + protected B self() { + return (B) this; + } + + /** + * The name of the field in a hash the index is going to be based on. + * + * @param name the name of the field + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B name(K name) { + instance.name = name; + return self(); + } + + /** + * Defines the attribute associated to the identifier. For example, you can use this feature to alias a complex JSONPath + * expression with more memorable (and easier to type) name. + * + * @param as the field name to be used in queries + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B as(K as) { + instance.as = Optional.of(as); + return self(); + } + + /** + * NUMERIC, TAG, TEXT, or GEO attributes can have an optional SORTABLE argument. As the user sorts the results by the + * value of this attribute, the results are available with very low latency. Default is false (not sortable). + *

+ * Note that this adds memory overhead, so consider not declaring it on large text attributes. You can sort an attribute + * without the SORTABLE option, but the latency is not as good as with SORTABLE. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B sortable() { + instance.sortable = true; + return self(); + } + + /** + * By default, for hashes (not with JSON) SORTABLE applies normalization to the indexed value (characters set to + * lowercase, removal of diacritics). When using the unnormalized form (UNF), you can disable the normalization and keep + * the original form of the value. With JSON, UNF is implicit with SORTABLE (normalization is disabled). + *

+ * Default is false (normalized form). + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B unNormalizedForm() { + instance.sortable = true; + instance.unNormalizedForm = true; + return self(); + } + + /** + * Attributes can have the NOINDEX option, which means they will not be indexed. This is useful in conjunction with + * {@link Builder#sortable()}, to create attributes whose update using PARTIAL will not cause full reindexing of the + * document. If an attribute has NOINDEX and doesn't have SORTABLE, it will just be ignored by the index. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B noIndex() { + instance.noIndex = true; + return self(); + } + + /** + * For TEXT and TAG attributes, introduced in v2.10, allows you to index and search for empty strings. By default, empty + * strings are not indexed. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B indexEmpty() { + instance.indexEmpty = true; + return self(); + } + + /** + * For all field types, introduced in v2.10, allows you to search for missing values, that is, documents that do not + * contain a specific field. Note the difference between a field with an empty value and a document with a missing + * value. By default, missing values are not indexed. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public B indexMissing() { + instance.indexMissing = true; + return self(); + } + + /** + * Build the field arguments. + * + * @return the field arguments instance + */ + public T build() { + return instance; + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/GeoFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/GeoFieldArgs.java new file mode 100644 index 000000000..76eba78c2 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/GeoFieldArgs.java @@ -0,0 +1,74 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +/** + * Field arguments for GEO fields in a RediSearch index. + *

+ * Geo fields are used to store geographical coordinates such as longitude and latitude. They enable geospatial radius queries, + * which allow you to implement location-based search functionality in your applications such as finding nearby restaurants, + * stores, or any other points of interest. + * + * @param Key type + * @see Geo + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +public class GeoFieldArgs extends FieldArgs { + + /** + * Create a new {@link GeoFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "GEO"; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + // Geo fields have no type-specific arguments beyond the common ones + } + + /** + * Builder for {@link GeoFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new GeoFieldArgs<>()); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/GeoshapeFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/GeoshapeFieldArgs.java new file mode 100644 index 000000000..73e3c0580 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/GeoshapeFieldArgs.java @@ -0,0 +1,143 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.Optional; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Field arguments for GEOSHAPE fields in a RediSearch index. + *

+ * Geoshape fields provide more advanced functionality than GEO fields. You can use them to represent locations as points but + * also to define shapes and query the interactions between points and shapes (for example, to find all points that are + * contained within an enclosing shape). You can also choose between geographical coordinates (on the surface of a sphere) or + * standard Cartesian coordinates. + * + * @param Key type + * @see Geoshape + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class GeoshapeFieldArgs extends FieldArgs { + + /** + * Coordinate system for geoshape fields. + */ + public enum CoordinateSystem { + /** + * Cartesian (planar) coordinates. + */ + FLAT, + /** + * Spherical (geographical) coordinates. This is the default option. + */ + SPHERICAL + } + + private Optional coordinateSystem = Optional.empty(); + + /** + * Create a new {@link GeoshapeFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "GEOSHAPE"; + } + + /** + * Get the coordinate system. + * + * @return the coordinate system + */ + public Optional getCoordinateSystem() { + return coordinateSystem; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + coordinateSystem.ifPresent(cs -> { + switch (cs) { + case FLAT: + args.add(FLAT); + break; + case SPHERICAL: + args.add(SPHERICAL); + break; + } + }); + } + + /** + * Builder for {@link GeoshapeFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new GeoshapeFieldArgs<>()); + } + + /** + * Set the coordinate system for the geoshape field. + * + * @param coordinateSystem the coordinate system + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder coordinateSystem(CoordinateSystem coordinateSystem) { + instance.coordinateSystem = Optional.of(coordinateSystem); + return self(); + } + + /** + * Use Cartesian (planar) coordinates. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder flat() { + return coordinateSystem(CoordinateSystem.FLAT); + } + + /** + * Use spherical (geographical) coordinates. This is the default option. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder spherical() { + return coordinateSystem(CoordinateSystem.SPHERICAL); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/HighlightArgs.java b/src/main/java/io/lettuce/core/search/arguments/HighlightArgs.java new file mode 100644 index 000000000..49eb3a45f --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/HighlightArgs.java @@ -0,0 +1,127 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +/** + * Argument list builder for {@code HIGHLIGHT} clause. + * + * @param Key type. + * @param Value type. + * @see Highlighting + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class HighlightArgs { + + private final List fields = new ArrayList<>(); + + private Optional> tags = Optional.empty(); + + /** + * Used to build a new instance of the {@link HighlightArgs}. + * + * @return a {@link HighlightArgs.Builder} that provides the option to build up a new instance of the {@link SearchArgs} + * @param the key type + */ + public static HighlightArgs.Builder builder() { + return new HighlightArgs.Builder<>(); + } + + /** + * Builder for {@link HighlightArgs}. + *

+ * As a final step the {@link HighlightArgs.Builder#build()} method needs to be executed to create the final + * {@link SortByArgs} instance. + * + * @param the key type + * @see FT.CREATE + */ + public static class Builder { + + private final HighlightArgs highlightArgs = new HighlightArgs<>(); + + /** + * Add a field to highlight. If no FIELDS directive is passed, then all returned fields are highlighted. + * + * @param field the field to summarize + * @return the instance of the current {@link HighlightArgs.Builder} for the purpose of method chaining + */ + public HighlightArgs.Builder field(K field) { + highlightArgs.fields.add(field); + return this; + } + + /** + * Tags to surround the matched terms with. If no TAGS are specified, a built-in tag pair is prepended and appended to + * each matched term. + * + * @param startTag the string is prepended to each matched term + * @param endTag the string is appended to each matched term + * @return the instance of the current {@link HighlightArgs.Builder} for the purpose of method chaining + */ + public HighlightArgs.Builder tags(V startTag, V endTag) { + highlightArgs.tags = Optional.of(new Tags<>(startTag, endTag)); + return this; + } + + /** + * Build the {@link HighlightArgs}. + * + * @return the {@link HighlightArgs} + */ + public HighlightArgs build() { + return highlightArgs; + } + + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + args.add(CommandKeyword.HIGHLIGHT); + + if (!fields.isEmpty()) { + args.add(CommandKeyword.FIELDS); + args.add(fields.size()); + args.addKeys(fields); + } + + tags.ifPresent(tags -> { + args.add(CommandKeyword.TAGS); + args.addValue(tags.startTag); + args.addValue(tags.endTag); + }); + + } + + static class Tags { + + private final V startTag; + + private final V endTag; + + Tags(V startTag, V endTag) { + this.startTag = startTag; + this.endTag = endTag; + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/NumericFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/NumericFieldArgs.java new file mode 100644 index 000000000..304615d3e --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/NumericFieldArgs.java @@ -0,0 +1,75 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +/** + * Field arguments for NUMERIC fields in a RediSearch index. + *

+ * Numeric fields are used to store non-textual, countable values. They can hold integer or floating-point values. Numeric + * fields are sortable, meaning you can perform range-based queries and retrieve documents based on specific numeric conditions. + * For example, you can search for documents with a price between a certain range or retrieve documents with a specific rating + * value. + * + * @param Key type + * @see Numeric + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +public class NumericFieldArgs extends FieldArgs { + + /** + * Create a new {@link NumericFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "NUMERIC"; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + // Numeric fields have no type-specific arguments beyond the common ones + } + + /** + * Builder for {@link NumericFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new NumericFieldArgs<>()); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/QueryDialects.java b/src/main/java/io/lettuce/core/search/arguments/QueryDialects.java new file mode 100644 index 000000000..0a480e785 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/QueryDialects.java @@ -0,0 +1,38 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +public enum QueryDialects { + + DIALECT1("1"), DIALECT2("2"), DIALECT3("3"), DIALECT4("4"); + + private final String dialect; + + QueryDialects(String dialect) { + this.dialect = dialect; + } + + @Override + public String toString() { + return dialect; + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/ScoringFunction.java b/src/main/java/io/lettuce/core/search/arguments/ScoringFunction.java new file mode 100644 index 000000000..bd8f90a0e --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/ScoringFunction.java @@ -0,0 +1,97 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +/** + * Scoring function for search queries. + *

+ * The scoring function determines how the relevance of a document is calculated. + *

+ * The default scoring function is {@link ScoringFunction#TF_IDF}. + * + * @see Scoring + * @since 6.8 + * @author Tihomir Mateev + */ +public enum ScoringFunction { + + /** + * Term Frequency - Inverse Document Frequency. + *

+ * This is the default setting. + * + * @see Wikipedia + */ + TF_IDF("TFIDF"), + + /** + * Term Frequency - Inverse Document Frequency with document normalization. + *

+ * Identical to the default TFIDF scorer, with one important distinction - term frequencies are normalized by the length of + * the document, expressed as the total number of terms. The length is weighted, so that if a document contains two terms, + * one in a field that has a weight 1 and one in a field with a weight of 5, the total frequency is 6, not 2. + * + * @see Wikipedia + */ + TF_IDF_NORMALIZED("TFIDF.DOCNORM"), + + /** + * A variation on the basic TFIDF scorer. The relevance score for each document is multiplied by the presumptive document + * score, and a penalty is applied based on slop as in TFIDF. + * + * @see Wikipedia + */ + BM25("BM25"), + + /** + * A simple scorer that sums up the frequencies of matched terms. In the case of union clauses, it will give the maximum + * value of those matches. No other penalties or factors are applied. + * + * @see DisMax + */ + DIS_MAX("DISMAX"), + + /** + * A scoring function that just returns the presumptive score of the document without applying any calculations to it. Since + * document scores can be updated, this can be useful if you'd like to use an external score and nothing further. + */ + DOCUMENT_SCORE("DOCSCORE"), + + /** + * Scoring by the inverse Hamming distance between the document's payload and the query payload is performed. Since the + * nearest neighbors are of interest, the inverse Hamming distance (1/(1+d)) is used so that a distance of 0 gives a perfect + * score of 1 and is the highest rank. + *

+ * This only works if: + *

    + *
  • The document has a payload. + *
  • + *
  • The query has a payload. + *
  • + *
  • Both are exactly the same length. + *
  • + *
+ * Payloads are binary-safe, and having payloads with a length that is a multiple of 64 bits yields slightly faster results. + *

+ * + * @see Wikipedia + */ + HAMMING_DISTANCE("HAMMING"); + + private final String name; + + ScoringFunction(String function) { + this.name = function; + } + + @Override + public String toString() { + return name; + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java b/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java new file mode 100644 index 000000000..ac8ee74b2 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java @@ -0,0 +1,570 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.OptionalLong; + +/** + * Argument list builder for {@code FT.SEARCH}. + * + * @param Key type. + * @param Value type. + * @since 6.8 + * @author Tihomir Mateev + * @see FT.SEARCH + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class SearchArgs { + + private Optional noContent = Optional.empty(); + + private Optional verbatim = Optional.empty(); + + private Optional noStopWords = Optional.empty(); + + private Optional withScores = Optional.empty(); + + private Optional withPayloads = Optional.empty(); + + private Optional withSortKeys = Optional.empty(); + + // FIXME verify if we need to support this, deprecated since 2.10 + // private List> filters = new ArrayList<>(); + + // FIXME verify if we need to support this, deprecated since 2.6 + // private Optional> geoFilter = Optional.empty(); + + private final List inKeys = new ArrayList<>(); + + private final List inFields = new ArrayList<>(); + + private final Map> returnFields = new HashMap<>(); + + private Optional> summarize = Optional.empty(); + + private Optional> highlight = Optional.empty(); + + private OptionalLong slop = OptionalLong.empty(); + + private Optional inOrder = Optional.empty(); + + private Optional language = Optional.empty(); + + private Optional expander = Optional.empty(); + + private Optional scorer = Optional.empty(); + + // FIXME verify if we want to support this + // private Optional explainScore = Optional.empty(); + + private Optional payload = Optional.empty(); + + private Optional> sortBy = Optional.empty(); + + private Optional limit = Optional.empty(); + + private Optional timeout = Optional.empty(); + + private final Map params = new HashMap<>(); + + private QueryDialects dialect = QueryDialects.DIALECT2; + + /** + * Used to build a new instance of the {@link SearchArgs}. + * + * @return a {@link SearchArgs.Builder} that provides the option to build up a new instance of the {@link SearchArgs} + * @param the key type + * @param the value type + */ + public static SearchArgs.Builder builder() { + return new SearchArgs.Builder<>(); + } + + /** + * Builder for {@link SearchArgs}. + *

+ * As a final step the {@link SearchArgs.Builder#build()} method needs to be executed to create the final {@link SearchArgs} + * instance. + * + * @param the key type + * @param the value type + * @see FT.CREATE + */ + public static class Builder { + + private final SearchArgs instance = new SearchArgs<>(); + + /** + * Build a new instance of the {@link SearchArgs}. + * + * @return a new instance of the {@link SearchArgs} + */ + public SearchArgs build() { + return instance; + } + + /** + * Returns the document ids and not the content. This is useful if RediSearch is only an index on an external document + * collection. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder noContent() { + instance.noContent = Optional.of(true); + return this; + } + + /** + * Do not try to use stemming for query expansion but searches the query terms verbatim. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder verbatim() { + instance.verbatim = Optional.of(true); + return this; + } + + /** + * Ignore any defined stop words in full text searches. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder noStopWords() { + instance.noStopWords = Optional.of(true); + return this; + } + + /** + * Return the relative internal score of each document. This can be used to merge results from multiple instances. + * Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder withScores() { + instance.withScores = Optional.of(true); + return this; + } + + /** + * Retrieve optional document payloads. The payloads follow the document id and, if + * {@link SearchArgs.Builder#withScores} is set, the scores. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see FT.CREATE + */ + public SearchArgs.Builder withPayloads() { + instance.withPayloads = Optional.of(true); + return this; + } + + /** + * Return the value of the sorting key, right after the id and score and/or payload, if requested. This is usually not + * needed, and exists for distributed search coordination purposes. This option is relevant only if used in conjunction + * with {@link SearchArgs.Builder#sortBy(SortByArgs)}. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder withSortKeys() { + instance.withSortKeys = Optional.of(true); + return this; + } + + /** + * Limit the result to a given set of keys specified in the list. Non-existent keys are ignored, unless all the keys are + * non-existent. + * + * @param key the key to search in + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder inKey(K key) { + instance.inKeys.add(key); + return this; + } + + /** + * Filter the result to those appearing only in specific attributes of the document. + * + * @param field the field to search in + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder inField(K field) { + instance.inFields.add(field); + return this; + } + + /** + * Limit the attributes returned from the document. The field is either an attribute name (for hashes and JSON) or a + * JSON Path expression (for JSON). as is the name of the field used in the result as an alias. + * + * @param field the field to return + * @param as the alias to use for this field in the result + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder returnField(K field, K as) { + instance.returnFields.put(field, Optional.ofNullable(as)); + return this; + } + + /** + * Limit the attributes returned from the document. The field is either an attribute name (for hashes and JSON) or a + * JSON Path expression (for JSON). + * + * @param field the field to return + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder returnField(K field) { + instance.returnFields.put(field, Optional.empty()); + return this; + } + + /** + * Return only the sections of the attribute that contain the matched text. + * + * @param summarizeFilter the summarization filter + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder summarize(SummarizeArgs summarizeFilter) { + instance.summarize = Optional.ofNullable(summarizeFilter); + return this; + } + + /** + * Format occurrences of matched text. + * + * @param highlightFilter the highlighting filter + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder highlight(HighlightArgs highlightFilter) { + instance.highlight = Optional.ofNullable(highlightFilter); + return this; + } + + /** + * Allow for a number of intermediate terms allowed to appear between the terms of the query. Suppose you're searching + * for a phrase hello world, if some other terms appear in-between hello and + * world, a SLOP greater than 0 allows for these text attributes to match. By default, there is no SLOP + * constraint. + * + * @param slop the slop value how many intermediate terms are allowed + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder slop(long slop) { + instance.slop = OptionalLong.of(slop); + return this; + } + + /** + * Require the terms in the document to have the same order as the terms in the query, regardless of the offsets between + * them. Typically used in conjunction with {@link SearchArgs.Builder#slop(long)}. Disabled by default. + * + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder inOrder() { + instance.inOrder = Optional.of(true); + return this; + } + + /** + * Specify the language of the query. This is used to stem the query terms. The default is + * {@link DocumentLanguage#ENGLISH}. + *

+ * If this setting was specified as part of index creation, it doesn't need to be specified here. + * + * @param language the language of the query + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder language(DocumentLanguage language) { + instance.language = Optional.ofNullable(language); + return this; + } + + /** + * Use a custom query expander instead of the stemmer + * + * @param expander the query expander to use + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Extensions + */ + public SearchArgs.Builder expander(V expander) { + instance.expander = Optional.ofNullable(expander); + return this; + } + + /** + * Use a built-in or a user-provided scoring function + * + * @param scorer the {@link ScoringFunction} to use + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Extensions + * @see Scoring + */ + public SearchArgs.Builder scorer(ScoringFunction scorer) { + instance.scorer = Optional.ofNullable(scorer); + return this; + } + + // /** + // * Return a textual description of how the scores were calculated. Using this option requires + // * {@link Builder#withScores()}. + // * + // * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + // */ + // public SearchArgs.Builder explainScore() { + // instance.explainScore = Optional.of(true); + // return this; + // } + + /** + * Add an arbitrary, binary safe payload exposed to custom scoring functions. + * + * @param payload the payload to return + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Extensions + */ + public SearchArgs.Builder payload(V payload) { + instance.payload = Optional.ofNullable(payload); + return this; + } + + /** + * Order the results by the value of this attribute. This applies to both text and numeric attributes. Attributes needed + * for SORTBY should be declared as SORTABLE in the index, to be available with very low latency. + *

+ * Note that this adds memory overhead. + * + * @param sortBy the {@link SortByArgs} to use + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder sortBy(SortByArgs sortBy) { + instance.sortBy = Optional.ofNullable(sortBy); + return this; + } + + /** + * Limit the results to the offset and number of results given. Note that the offset is zero-indexed. The default is 0 + * 10, which returns 10 items starting from the first result. You can use LIMIT 0 0 to count the number of documents in + * the result set without actually returning them. + *

+ * LIMIT behavior: If you use the LIMIT option without sorting, the results returned are non-deterministic, which means + * that subsequent queries may return duplicated or missing values. Add SORTBY with a unique field, or use FT.AGGREGATE + * with the WITHCURSOR option to ensure deterministic result set paging. + * + * @param offset the offset to use + * @param number the limit to use + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder limit(long offset, long number) { + instance.limit = Optional.of(new Limit(offset, number)); + return this; + } + + /** + * Override the maximum time to wait for the query to complete. + * + * @param timeout the timeout to use (with millisecond resolution) + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder timeout(Duration timeout) { + instance.timeout = Optional.ofNullable(timeout); + return this; + } + + /** + * Add one or more value parameters. Each parameter has a name and a value. + *

+ * Requires {@link QueryDialects#DIALECT2} or higher. + * + * @param name the name of the parameter + * @param value the value of the parameter + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + */ + public SearchArgs.Builder param(K name, V value) { + instance.params.put(name, value); + return this; + } + + /** + * Set the query dialect. The default is {@link QueryDialects#DIALECT2}. + * + * @param dialect the dialect to use + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see QueryDialects + */ + public SearchArgs.Builder dialect(QueryDialects dialect) { + instance.dialect = dialect; + return this; + } + + } + + /** + * Gets whether the NOCONTENT option is enabled. + * + * @return true if NOCONTENT is enabled, false otherwise + */ + public boolean isNoContent() { + return noContent.orElse(false); + } + + /** + * Gets whether the WITHSCORES option is enabled. + * + * @return true if WITHSCORES is enabled, false otherwise + */ + public boolean isWithScores() { + return withScores.orElse(false); + } + + /** + * Gets whether the WITHPAYLOADS option is enabled. + * + * @return true if WITHPAYLOADS is enabled, false otherwise + */ + public boolean isWithPayloads() { + return withPayloads.orElse(false); + } + + /** + * Gets whether the WITHSORTKEYS option is enabled. + * + * @return true if WITHSORTKEYS is enabled, false otherwise + */ + public boolean isWithSortKeys() { + return withSortKeys.orElse(false); + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + + noContent.ifPresent(v -> args.add(CommandKeyword.NOCONTENT)); + verbatim.ifPresent(v -> args.add(CommandKeyword.VERBATIM)); + noStopWords.ifPresent(v -> args.add(CommandKeyword.NOSTOPWORDS)); + withScores.ifPresent(v -> args.add(CommandKeyword.WITHSCORES)); + withPayloads.ifPresent(v -> args.add(CommandKeyword.WITHPAYLOADS)); + withSortKeys.ifPresent(v -> args.add(CommandKeyword.WITHSORTKEYS)); + + if (!inKeys.isEmpty()) { + args.add(CommandKeyword.INKEYS); + args.add(inKeys.size()); + args.addKeys(inKeys); + } + + if (!inFields.isEmpty()) { + args.add(CommandKeyword.INFIELDS); + args.add(inFields.size()); + args.addKeys(inFields); + } + + if (!returnFields.isEmpty()) { + args.add(CommandKeyword.RETURN); + args.add(returnFields.size()); + returnFields.forEach((field, as) -> { + args.addKey(field); + as.ifPresent(args::addKey); + }); + } + + summarize.ifPresent(summarizeArgs -> { + summarizeArgs.build(args); + }); + + highlight.ifPresent(highlightArgs -> { + highlightArgs.build(args); + }); + + slop.ifPresent(v -> { + args.add(CommandKeyword.SLOP); + args.add(v); + }); + + timeout.ifPresent(timeoutDuration -> { + args.add(CommandKeyword.TIMEOUT); + args.add(timeoutDuration.toMillis()); + }); + + inOrder.ifPresent(v -> args.add(CommandKeyword.INORDER)); + + language.ifPresent(documentLanguage -> { + args.add(CommandKeyword.LANGUAGE); + args.add(documentLanguage.toString()); + }); + + expander.ifPresent(v -> { + args.add(CommandKeyword.EXPANDER); + args.addValue(v); + }); + + scorer.ifPresent(scoringFunction -> { + args.add(CommandKeyword.SCORER); + args.add(scoringFunction.toString()); + }); + + // explainScore.ifPresent(v -> args.add(CommandKeyword.EXPLAINSCORE)); + + payload.ifPresent(v -> { + args.add(CommandKeyword.PAYLOAD); + args.addValue(v); + }); + + sortBy.ifPresent(sortByArgs -> { + sortByArgs.build(args); + }); + + limit.ifPresent(limitArgs -> { + args.add(CommandKeyword.LIMIT); + args.add(limitArgs.offset); + args.add(limitArgs.num); + }); + + if (!params.isEmpty()) { + args.add(CommandKeyword.PARAMS); + args.add(params.size() * 2L); + params.forEach((name, value) -> { + args.addKey(name); + args.addValue(value); + }); + } + + args.add(CommandKeyword.DIALECT); + args.add(dialect.toString()); + } + + static class Limit { + + private final long offset; + + private final long num; + + Limit(long offset, long num) { + this.offset = offset; + this.num = num; + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/SortByArgs.java b/src/main/java/io/lettuce/core/search/arguments/SortByArgs.java new file mode 100644 index 000000000..23605e1a3 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/SortByArgs.java @@ -0,0 +1,111 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; + +/** + * Argument list builder for {@code SORTBY} clause. + * + * @param Key type. + * @see Sorting + * @since 6.8 + * @author Tihomir Mateev + */ +public class SortByArgs { + + private K attribute; + + private boolean isDescending; + + private boolean withCount; + + /** + * Used to build a new instance of the {@link SortByArgs}. + * + * @return a {@link SortByArgs.Builder} that provides the option to build up a new instance of the {@link SearchArgs} + * @param the key type + */ + public static SortByArgs.Builder builder() { + return new SortByArgs.Builder<>(); + } + + /** + * Builder for {@link SortByArgs}. + *

+ * As a final step the {@link SortByArgs.Builder#build()} method needs to be executed to create the final {@link SortByArgs} + * instance. + * + * @param the key type + * @see FT.CREATE + */ + public static class Builder { + + private final SortByArgs sortByArgs = new SortByArgs<>(); + + /** + * Add an attribute to sort by. + * + * @param attribute the attribute to sort by + * @return the instance of the current {@link SortByArgs.Builder} for the purpose of method chaining + */ + public SortByArgs.Builder attribute(K attribute) { + sortByArgs.attribute = attribute; + return this; + } + + /** + * Sort in descending order. Default is ascending. + * + * @return the instance of the current {@link SortByArgs.Builder} for the purpose of method chaining + */ + public SortByArgs.Builder descending() { + sortByArgs.isDescending = true; + return this; + } + + /** + * Include the accurate counts for the query results with sorting. Default is disabled. + * + * @return the instance of the current {@link SortByArgs.Builder} for the purpose of method chaining + */ + public SortByArgs.Builder withCount() { + sortByArgs.withCount = true; + return this; + } + + /** + * Build the {@link SortByArgs}. + * + * @return the {@link SortByArgs} + */ + public SortByArgs build() { + return sortByArgs; + } + + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + args.add(CommandKeyword.SORTBY).addKey(attribute); + + if (this.isDescending) { + args.add(CommandKeyword.DESC); + } + + if (this.withCount) { + args.add(CommandKeyword.WITHCOUNT); + } + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/SummarizeArgs.java b/src/main/java/io/lettuce/core/search/arguments/SummarizeArgs.java new file mode 100644 index 000000000..09632f68d --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/SummarizeArgs.java @@ -0,0 +1,151 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; +import io.lettuce.core.protocol.CommandKeyword; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +/** + * Argument list builder for {@code SUMMARIZE} clause. + * + * @param Key type. + * @param Value type. + * @see Highlighing + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class SummarizeArgs { + + private final List fields = new ArrayList<>(); + + private Optional frags = Optional.empty(); + + private Optional len = Optional.empty(); + + private Optional separator = Optional.empty(); + + /** + * Used to build a new instance of the {@link SummarizeArgs}. + * + * @return a {@link SummarizeArgs.Builder} that provides the option to build up a new instance of the {@link SearchArgs} + * @param the key type + */ + public static SummarizeArgs.Builder builder() { + return new SummarizeArgs.Builder<>(); + } + + /** + * Builder for {@link SummarizeArgs}. + *

+ * As a final step the {@link SummarizeArgs.Builder#build()} method needs to be executed to create the final + * {@link SortByArgs} instance. + * + * @param the key type + * @see FT.CREATE + */ + public static class Builder { + + private final SummarizeArgs summarizeArgs = new SummarizeArgs<>(); + + /** + * Add a field to summarize. Each field is summarized. If no FIELDS directive is passed, then all returned fields are + * summarized. + * + * @param field the field to summarize + * @return the instance of the current {@link SummarizeArgs.Builder} for the purpose of method chaining + */ + public SummarizeArgs.Builder field(K field) { + summarizeArgs.fields.add(field); + return this; + } + + /** + * Set the number of fragments to be returned. If not specified, the default is 3. + * + * @param frags the number of fragments to return + * @return the instance of the current {@link SummarizeArgs.Builder} for the purpose of method chaining + */ + public SummarizeArgs.Builder fragments(long frags) { + summarizeArgs.frags = Optional.of(frags); + return this; + } + + /** + * Set the number of context words each fragment should contain. Context words surround the found term. A higher value + * will return a larger block of text. If not specified, the default value is 20. + * + * @param len the length of the fragments + * @return the instance of the current {@link SummarizeArgs.Builder} for the purpose of method chaining + */ + + public SummarizeArgs.Builder len(long len) { + summarizeArgs.len = Optional.of(len); + return this; + } + + /** + * The string used to divide individual summary snippets. The default is ... which is common among search + * engines, but you may override this with any other string if you desire to programmatically divide the snippets later + * on. You may also use a newline sequence, as newlines are stripped from the result body during processing. + * + * @param separator the separator between fragments + * @return the instance of the current {@link SummarizeArgs.Builder} for the purpose of method chaining + */ + public SummarizeArgs.Builder separator(V separator) { + summarizeArgs.separator = Optional.of(separator); + return this; + } + + /** + * Build the {@link SummarizeArgs}. + * + * @return the {@link SummarizeArgs} + */ + public SummarizeArgs build() { + return summarizeArgs; + } + + } + + /** + * Build a {@link CommandArgs} object that contains all the arguments. + * + * @param args the {@link CommandArgs} object + */ + public void build(CommandArgs args) { + args.add(CommandKeyword.SUMMARIZE); + + if (!fields.isEmpty()) { + args.add(CommandKeyword.FIELDS); + args.add(fields.size()); + args.addKeys(fields); + } + + frags.ifPresent(f -> { + args.add(CommandKeyword.FRAGS); + args.add(f); + }); + + len.ifPresent(l -> { + args.add(CommandKeyword.LEN); + args.add(l); + }); + + separator.ifPresent(s -> { + args.add(CommandKeyword.SEPARATOR); + args.addValue(s); + }); + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/TagFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/TagFieldArgs.java new file mode 100644 index 000000000..abe73d2d2 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/TagFieldArgs.java @@ -0,0 +1,153 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.Optional; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Field arguments for TAG fields in a RediSearch index. + *

+ * Tag fields are used to store textual data that represents a collection of data tags or labels. Tag fields are characterized + * by their low cardinality, meaning they typically have a limited number of distinct values. Unlike text fields, tag fields are + * stored as-is without tokenization or stemming. They are useful for organizing and categorizing data, making it easier to + * filter and retrieve documents based on specific tags. + * + * @param Key type + * @see Tag + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class TagFieldArgs extends FieldArgs { + + private Optional separator = Optional.empty(); + + private boolean caseSensitive; + + private boolean withSuffixTrie; + + /** + * Create a new {@link TagFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "TAG"; + } + + /** + * Get the separator for tag fields. + * + * @return the separator + */ + public Optional getSeparator() { + return separator; + } + + /** + * Check if the field is case sensitive. + * + * @return true if case sensitive + */ + public boolean isCaseSensitive() { + return caseSensitive; + } + + /** + * Check if suffix trie is enabled. + * + * @return true if suffix trie is enabled + */ + public boolean isWithSuffixTrie() { + return withSuffixTrie; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + separator.ifPresent(s -> args.add(SEPARATOR).add(s)); + if (caseSensitive) { + args.add(CASESENSITIVE); + } + if (withSuffixTrie) { + args.add(WITHSUFFIXTRIE); + } + } + + /** + * Builder for {@link TagFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new TagFieldArgs<>()); + } + + /** + * The separator for TAG attributes. The default separator is a comma. + * + * @param separator the separator for tag fields + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder separator(String separator) { + instance.separator = Optional.of(separator); + return self(); + } + + /** + * Keeps the original letter cases of the tags. If not specified, the characters are converted to lowercase. Works with + * TAG attributes. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder caseSensitive() { + instance.caseSensitive = true; + return self(); + } + + /** + * For TAG attributes, keeps a suffix trie with all terms which match the suffix. It is used to optimize contains + * (*foo*) and suffix (*foo) queries. Otherwise, a brute-force search on the trie is performed. If the suffix trie + * exists for some fields, these queries will be disabled for other fields. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder withSuffixTrie() { + instance.withSuffixTrie = true; + return self(); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/TextFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/TextFieldArgs.java new file mode 100644 index 000000000..1f6f954f6 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/TextFieldArgs.java @@ -0,0 +1,209 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.Optional; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Field arguments for TEXT fields in a RediSearch index. + *

+ * Text fields are specifically designed for storing human language text. When indexing text fields, Redis performs several + * transformations to optimize search capabilities. The text is transformed to lowercase, allowing case-insensitive searches. + * The data is tokenized, meaning it is split into individual words or tokens, which enables efficient full-text search + * functionality. + * + * @param Key type + * @see Text + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class TextFieldArgs extends FieldArgs { + + /** + * Phonetic matchers for text fields. + */ + public enum PhoneticMatcher { + + ENGLISH("dm:en"), FRENCH("dm:fr"), PORTUGUESE("dm:pt"), SPANISH("dm:es"); + + private final String matcher; + + PhoneticMatcher(String matcher) { + this.matcher = matcher; + } + + public String getMatcher() { + return matcher; + } + + } + + private Optional weight = Optional.empty(); + + private boolean noStem; + + private Optional phonetic = Optional.empty(); + + private boolean withSuffixTrie; + + /** + * Create a new {@link TextFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "TEXT"; + } + + /** + * Get the weight of the field. + * + * @return the weight + */ + public Optional getWeight() { + return weight; + } + + /** + * Check if stemming is disabled. + * + * @return true if stemming is disabled + */ + public boolean isNoStem() { + return noStem; + } + + /** + * Get the phonetic matcher. + * + * @return the phonetic matcher + */ + public Optional getPhonetic() { + return phonetic; + } + + /** + * Check if suffix trie is enabled. + * + * @return true if suffix trie is enabled + */ + public boolean isWithSuffixTrie() { + return withSuffixTrie; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + weight.ifPresent(w -> args.add(WEIGHT).add(w)); + if (noStem) { + args.add(NOSTEM); + } + phonetic.ifPresent(p -> args.add(PHONETIC).add(p.getMatcher())); + if (withSuffixTrie) { + args.add(WITHSUFFIXTRIE); + } + } + + /** + * Builder for {@link TextFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new TextFieldArgs<>()); + } + + /** + * The weight of the field. Works with TEXT attributes, declares the importance of this attribute when calculating + * result accuracy. This is a multiplication factor. The default weight is 1. + * + * @param weight the weight of the field + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder weight(long weight) { + instance.weight = Optional.of(weight); + return self(); + } + + /** + * By default, the index applies stemming to TEXT fields. If you don't want to apply stemming to the field, you can use + * the NOSTEM argument. This may be ideal for things like proper names. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder noStem() { + instance.noStem = true; + return self(); + } + + /** + * Phonetic matching is a feature that allows you to search for similar-sounding words. For example, a search for + * "Smith" will also return results for "Smyth". Phonetic matching is language-specific, and you can specify the + * language using the PHONETIC argument. + *

+ * The following languages are supported: + *

    + *
  • ENGLISH
  • + *
  • FRENCH
  • + *
  • PORTUGUESE
  • + *
  • SPANISH
  • + *
+ * + * @see Phonetic + * Matching + * @param matcher the phonetic matcher + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder phonetic(PhoneticMatcher matcher) { + instance.phonetic = Optional.of(matcher); + return self(); + } + + /** + * For TEXT attributes, keeps a suffix trie with all terms which match the suffix. It is used to optimize contains + * (*foo*) and suffix (*foo) queries. Otherwise, a brute-force search on the trie is performed. If the suffix trie + * exists for some fields, these queries will be disabled for other fields. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder withSuffixTrie() { + instance.withSuffixTrie = true; + return self(); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java new file mode 100644 index 000000000..1500f26dc --- /dev/null +++ b/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java @@ -0,0 +1,231 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search.arguments; + +import io.lettuce.core.protocol.CommandArgs; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +import static io.lettuce.core.protocol.CommandKeyword.*; + +/** + * Field arguments for VECTOR fields in a RediSearch index. + *

+ * Vector fields are floating-point vectors that are typically generated by external machine learning models. These vectors + * represent unstructured data such as text, images, or other complex features. Redis allows you to search for similar vectors + * using vector search algorithms like cosine similarity, Euclidean distance, and inner product. + * + * @param Key type + * @see Vector + * Fields + * @since 6.8 + * @author Tihomir Mateev + */ +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public class VectorFieldArgs extends FieldArgs { + + /** + * Vector similarity index algorithms. + */ + public enum Algorithm { + /** + * Brute force algorithm. + */ + FLAT, + /** + * Hierarchical, navigable, small world algorithm. + */ + HNSW + } + + /** + * Vector data types. + */ + public enum VectorType { + /** + * 32-bit floating point. + */ + FLOAT32, + /** + * 64-bit floating point. + */ + FLOAT64 + } + + /** + * Distance metrics for vector similarity. + */ + public enum DistanceMetric { + /** + * Euclidean distance (L2 norm). + */ + L2, + /** + * Cosine similarity. + */ + COSINE, + /** + * Inner product. + */ + IP + } + + private Optional algorithm = Optional.empty(); + + private final Map attributes = new HashMap<>(); + + /** + * Create a new {@link VectorFieldArgs} using the builder pattern. + * + * @param Key type + * @return a new {@link Builder} + */ + public static Builder builder() { + return new Builder<>(); + } + + @Override + public String getFieldType() { + return "VECTOR"; + } + + /** + * Get the vector algorithm. + * + * @return the algorithm + */ + public Optional getAlgorithm() { + return algorithm; + } + + /** + * Get the vector attributes. + * + * @return the attributes + */ + public Map getAttributes() { + return new HashMap<>(attributes); + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + algorithm.ifPresent(alg -> args.add(alg.toString())); + + if (!attributes.isEmpty()) { + args.add(String.valueOf(attributes.size() * 2)); // count of attribute pairs + attributes.forEach((key, value) -> { + args.add(key); + args.add(value.toString()); + }); + } + } + + /** + * Builder for {@link VectorFieldArgs}. + * + * @param Key type + */ + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new VectorFieldArgs<>()); + } + + /** + * Set the vector similarity index algorithm. + * + * @param algorithm the algorithm + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder algorithm(Algorithm algorithm) { + instance.algorithm = Optional.of(algorithm); + return self(); + } + + /** + * Use the FLAT (brute force) algorithm. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder flat() { + return algorithm(Algorithm.FLAT); + } + + /** + * Use the HNSW (hierarchical, navigable, small world) algorithm. + * + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder hnsw() { + return algorithm(Algorithm.HNSW); + } + + /** + * Set the vector data type. + * + * @param type the vector data type + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder type(VectorType type) { + instance.attributes.put(TYPE.toString(), type.toString()); + return self(); + } + + /** + * Set the vector dimensionality. + * + * @param dimensions the number of dimensions + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder dimensions(int dimensions) { + instance.attributes.put(DIM.toString(), dimensions); + return self(); + } + + /** + * Set the distance metric. + * + * @param metric the distance metric + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder distanceMetric(DistanceMetric metric) { + instance.attributes.put(DISTANCE_METRIC.toString(), metric.toString()); + return self(); + } + + /** + * Add a custom attribute. + * + * @param name the attribute name + * @param value the attribute value + * @return the instance of the {@link Builder} for the purpose of method chaining + */ + public Builder attribute(String name, Object value) { + instance.attributes.put(name, value); + return self(); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/package-info.java b/src/main/java/io/lettuce/core/search/package-info.java new file mode 100644 index 000000000..0d4f7f5cd --- /dev/null +++ b/src/main/java/io/lettuce/core/search/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +/** + * Support for the RediSearch features. + */ +package io.lettuce.core.search; diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt new file mode 100644 index 000000000..9aeb95c66 --- /dev/null +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt @@ -0,0 +1,445 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.api.coroutines + +import io.lettuce.core.ExperimentalLettuceCoroutinesApi +import kotlinx.coroutines.flow.Flow +import io.lettuce.core.annotations.Experimental +import io.lettuce.core.search.SearchReply +import io.lettuce.core.search.arguments.AggregateArgs +import io.lettuce.core.search.arguments.CreateArgs +import io.lettuce.core.search.arguments.FieldArgs +import io.lettuce.core.search.arguments.SearchArgs + +/** + * Coroutine executed commands for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + * @generated by io.lettuce.apigenerator.CreateKotlinCoroutinesApi + */ +@ExperimentalLettuceCoroutinesApi +interface RediSearchCoroutinesCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the [FieldArgs] list defining the searchable fields and their types + * @return @code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Any, CreateArgs, List) + * @see #ftDropindex(Any) + */ + @Experimental + suspend fun ftCreate(index: K, fieldArgs: List>): String? + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The [CreateArgs] parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index [CreateArgs] containing configuration options + * @param fieldArgs the [FieldArgs] list defining the searchable fields and their types + * @return @code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Any, List) + * @see #ftDropindex(Any) + */ + @Experimental + suspend fun ftCreate(index: K, arguments: CreateArgs, fieldArgs: List>): String? + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return @code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Any, boolean) + * @see #ftCreate(Any, List) + */ + @Experimental + suspend fun ftDropindex(index: K): String? + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When `deleteDocuments` is + * `true`, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ([ftCreate(Any, List)] is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if `true`, delete the indexed documents as well; if `false`, preserve documents + * @return @code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Any) + * @see #ftCreate(Any, List) + */ + @Experimental + suspend fun ftDropindex(index: K, deleteDocuments: Boolean): String? + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see [SearchReply] + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Any, Any, SearchArgs) + */ + @Experimental + suspend fun ftSearch(index: K, query: V): SearchReply? + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * [SearchArgs]. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The [SearchArgs] parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see [SearchReply] + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Any, Any) + */ + @Experimental + suspend fun ftSearch(index: K, query: V, args: SearchArgs): SearchReply? + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * [ftSearch(Any, Any)], which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use [ftAggregate(Any, Any, AggregateArgs)]. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see [SearchReply] + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Any, Any, AggregateArgs) + */ + @Experimental + suspend fun ftAggregate(index: K, query: V): SearchReply? + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The [AggregateArgs] parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see [SearchReply] + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Any, Any) + * @see #ftCursorread(Any, long) + */ + @Experimental + suspend fun ftAggregate(index: K, query: V, args: AggregateArgs): SearchReply? + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * [ftAggregate(Any, Any, AggregateArgs)] with the `WITHCURSOR` option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The `count` parameter overrides the `COUNT` value specified in the original `FT.AGGREGATE` command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous `FT.AGGREGATE` or `FT.CURSOR READ` command + * @param count the number of results to read. This parameter overrides the `COUNT` specified in `FT.AGGREGATE` + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * [SearchReply] + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Any, Any, AggregateArgs) + */ + @Experimental + suspend fun ftCursorread(index: K, cursorId: Long, count: Int): SearchReply? + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * [ftAggregate(Any, Any, AggregateArgs)] with the `WITHCURSOR` option. This variant uses the default + * batch size that was specified in the original `FT.AGGREGATE` command's `WITHCURSOR` clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned [SearchReply] will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous `FT.AGGREGATE` or `FT.CURSOR READ` command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * [SearchReply] + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Any, Any, AggregateArgs) + */ + @Experimental + suspend fun ftCursorread(index: K, cursorId: Long): SearchReply? + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by [ftAggregate(Any, Any, AggregateArgs)] with + * the `WITHCURSOR` option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using [ftCursorread(Any, long)] or + * [ftCursorread(Any, long, Integer)] will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous `FT.AGGREGATE` or `FT.CURSOR READ` command + * @return @code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Any, Any, AggregateArgs) + * @see #ftCursorread(Any, long) + * @see #ftCursorread(Any, long, Integer) + */ + @Experimental + suspend fun ftCursordel(index: K, cursorId: Long): String? + +} + diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt new file mode 100644 index 000000000..911cb2b8f --- /dev/null +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt @@ -0,0 +1,68 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.api.coroutines + +import io.lettuce.core.ExperimentalLettuceCoroutinesApi +import io.lettuce.core.api.reactive.RediSearchReactiveCommands +import io.lettuce.core.search.SearchReply +import io.lettuce.core.search.arguments.AggregateArgs +import io.lettuce.core.search.arguments.CreateArgs +import io.lettuce.core.search.arguments.FieldArgs +import io.lettuce.core.search.arguments.SearchArgs +import kotlinx.coroutines.reactive.awaitFirstOrNull + +/** + * Coroutine executed commands (based on reactive commands) for RediSearch. + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @since 6.8 + */ +@ExperimentalLettuceCoroutinesApi +open class RediSearchCoroutinesCommandsImpl(internal val ops: RediSearchReactiveCommands) : + RediSearchCoroutinesCommands { + + override suspend fun ftCreate(index: K, arguments: CreateArgs, fieldArgs: List>): String? = + ops.ftCreate(index, arguments, fieldArgs).awaitFirstOrNull() + + override suspend fun ftCreate(index: K, fieldArgs: List>): String? = + ops.ftCreate(index, fieldArgs).awaitFirstOrNull() + + override suspend fun ftDropindex(index: K, deleteDocuments: Boolean): String? = + ops.ftDropindex(index, deleteDocuments).awaitFirstOrNull() + + override suspend fun ftDropindex(index: K): String? = + ops.ftDropindex(index).awaitFirstOrNull() + + override suspend fun ftSearch(index: K, query: V): SearchReply? = + ops.ftSearch(index, query).awaitFirstOrNull() + + override suspend fun ftSearch(index: K, query: V, args: SearchArgs): SearchReply? = + ops.ftSearch(index, query, args).awaitFirstOrNull() + + override suspend fun ftAggregate(index: K, query: V, args: AggregateArgs): SearchReply? { + return ops.ftAggregate(index, query, args).awaitFirstOrNull() + } + + override suspend fun ftAggregate(index: K, query: V): SearchReply? { + return ops.ftAggregate(index, query).awaitFirstOrNull() + } + + override suspend fun ftCursorread(index: K, cursorId: Long): SearchReply? { + return ops.ftCursorread(index, cursorId).awaitFirstOrNull() + } + + override suspend fun ftCursorread(index: K, cursorId: Long, count: Int): SearchReply? { + return ops.ftCursorread(index, cursorId, count).awaitFirstOrNull() + } + + override suspend fun ftCursordel(index: K, cursorId: Long): String? { + return ops.ftCursordel(index, cursorId).awaitFirstOrNull() + } +} diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RedisCoroutinesCommandsImpl.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RedisCoroutinesCommandsImpl.kt index e1a803cf2..5da74d93c 100644 --- a/src/main/kotlin/io/lettuce/core/api/coroutines/RedisCoroutinesCommandsImpl.kt +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RedisCoroutinesCommandsImpl.kt @@ -54,7 +54,8 @@ open class RedisCoroutinesCommandsImpl( RedisStringCoroutinesCommands by RedisStringCoroutinesCommandsImpl(ops), RedisTransactionalCoroutinesCommands by RedisTransactionalCoroutinesCommandsImpl(ops), RedisJsonCoroutinesCommands by RedisJsonCoroutinesCommandsImpl(ops), - RedisVectorSetCoroutinesCommands by RedisVectorSetCoroutinesCommandsImpl(ops) { + RedisVectorSetCoroutinesCommands by RedisVectorSetCoroutinesCommandsImpl(ops), + RediSearchCoroutinesCommands by RediSearchCoroutinesCommandsImpl(ops){ /** diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RedisVectorSetCoroutinesCommandsImpl.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RedisVectorSetCoroutinesCommandsImpl.kt index 883edabb4..0ddfde0ef 100644 --- a/src/main/kotlin/io/lettuce/core/api/coroutines/RedisVectorSetCoroutinesCommandsImpl.kt +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RedisVectorSetCoroutinesCommandsImpl.kt @@ -27,7 +27,6 @@ import kotlinx.coroutines.reactive.asFlow * @since 6.7 */ @ExperimentalLettuceCoroutinesApi - internal class RedisVectorSetCoroutinesCommandsImpl(internal val ops: RedisVectorSetReactiveCommands) : RedisVectorSetCoroutinesCommands { diff --git a/src/main/templates/io/lettuce/core/api/RediSearchCommands.java b/src/main/templates/io/lettuce/core/api/RediSearchCommands.java new file mode 100644 index 000000000..e096f648e --- /dev/null +++ b/src/main/templates/io/lettuce/core/api/RediSearchCommands.java @@ -0,0 +1,441 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.api; + +import io.lettuce.core.annotations.Experimental; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; + +import java.util.List; + +/** + * ${intent} for RediSearch functionality + * + * @param Key type. + * @param Value type. + * @author Tihomir Mateev + * @see RediSearch + * @since 6.8 + */ +public interface RediSearchCommands { + + /** + * Create a new search index with the given name and field definitions using default settings. + * + *

+ * This command creates a new search index that enables full-text search, filtering, and aggregation capabilities on Redis + * data structures. The index will use default settings for data type (HASH), key prefixes (all keys), and other + * configuration options. + *

+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, CreateArgs, List) + * @see #ftDropindex(Object) + */ + @Experimental + String ftCreate(K index, List> fieldArgs); + + /** + * Create a new search index with the given name, custom configuration, and field definitions. + * + *

+ * This command creates a new search index with advanced configuration options that control how the index behaves, what data + * it indexes, and how it processes documents. This variant provides full control over index creation parameters. + *

+ * + *

+ * The {@link CreateArgs} parameter allows you to specify: + *

+ *
    + *
  • Data type: HASH (default) or JSON documents
  • + *
  • Key prefixes: Which keys to index based on prefix patterns
  • + *
  • Filters: Conditional indexing based on field values
  • + *
  • Language settings: Default language and language field for stemming
  • + *
  • Performance options: NOOFFSETS, NOHL, NOFIELDS, NOFREQS for memory optimization
  • + *
  • Temporary indexes: Auto-expiring indexes for short-term use
  • + *
+ * + *

+ * Time complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is + * triggered, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param arguments the index {@link CreateArgs} containing configuration options + * @param fieldArgs the {@link FieldArgs} list defining the searchable fields and their types + * @return {@code "OK"} if the index was created successfully + * @since 6.8 + * @see FT.CREATE + * @see CreateArgs + * @see FieldArgs + * @see #ftCreate(Object, List) + * @see #ftDropindex(Object) + */ + @Experimental + String ftCreate(K index, CreateArgs arguments, List> fieldArgs); + + /** + * Drop a search index without deleting the associated documents. + * + *

+ * This command removes the search index and all its associated metadata, but preserves the original documents (hashes or + * JSON objects) that were indexed. This is the safe default behavior that allows you to recreate the index later without + * losing data. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object, boolean) + * @see #ftCreate(Object, List) + */ + @Experimental + String ftDropindex(K index); + + /** + * Drop a search index with optional document deletion. + * + *

+ * This command removes the search index and optionally deletes all associated documents. When {@code deleteDocuments} is + * {@code true}, this operation becomes destructive and will permanently remove both the index and all indexed documents + * from Redis. + *

+ * + *

+ * Asynchronous Behavior: If an index creation is still running ({@link #ftCreate(Object, List)} is running + * asynchronously), only the document hashes that have already been indexed are deleted. Documents that are queued for + * indexing but not yet processed will remain in the database. + *

+ * + *

+ * Time complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace + *

+ * + * @param index the index name, as a key + * @param deleteDocuments if {@code true}, delete the indexed documents as well; if {@code false}, preserve documents + * @return {@code "OK"} if the index was successfully dropped + * @since 6.8 + * @see FT.DROPINDEX + * @see #ftDropindex(Object) + * @see #ftCreate(Object, List) + */ + @Experimental + String ftDropindex(K index, boolean deleteDocuments); + + /** + * Search the index with a textual query using default search options. + * + *

+ * This command performs a full-text search on the specified index using the provided query string. It returns matching + * documents with their content and metadata. This is the basic search variant that uses default search behavior without + * additional filtering, sorting, or result customization. + *

+ * + *

+ * The query follows RediSearch query syntax, supporting: + *

+ *
    + *
  • Simple text search: {@code "hello world"} - searches for documents containing both terms
  • + *
  • Field-specific search: {@code "@title:redis"} - searches within specific fields
  • + *
  • Boolean operators: {@code "redis AND search"} or {@code "redis | search"}
  • + *
  • Phrase search: {@code "\"exact phrase\""} - searches for exact phrase matches
  • + *
  • Wildcard search: {@code "redi*"} - prefix matching
  • + *
  • Numeric ranges: {@code "@price:[100 200]"} - numeric field filtering
  • + *
  • Geographic search: {@code "@location:[lon lat radius unit]"} - geo-spatial queries
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @return the result of the search command containing matching documents, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object, SearchArgs) + */ + @Experimental + SearchReply ftSearch(K index, V query); + + /** + * Search the index with a textual query using advanced search options and filters. + * + *

+ * This command performs a full-text search on the specified index with advanced configuration options provided through + * {@link SearchArgs}. This variant allows fine-grained control over search behavior, result formatting, filtering, sorting, + * and pagination. + *

+ * + *

+ * The {@link SearchArgs} parameter enables you to specify: + *

+ *
    + *
  • Result options: NOCONTENT, WITHSCORES, WITHPAYLOADS, WITHSORTKEYS
  • + *
  • Query behavior: VERBATIM (no stemming), NOSTOPWORDS
  • + *
  • Filtering: Numeric filters, geo filters, field filters
  • + *
  • Result customization: RETURN specific fields, SUMMARIZE, HIGHLIGHT
  • + *
  • Sorting and pagination: SORTBY, LIMIT offset and count
  • + *
  • Performance options: TIMEOUT, SLOP, INORDER
  • + *
  • Language and scoring: LANGUAGE, SCORER, EXPLAINSCORE
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use NOCONTENT when you only need document IDs
  • + *
  • Specify RETURN fields to limit data transfer
  • + *
  • Use SORTABLE fields for efficient sorting
  • + *
  • Apply filters to reduce result set size
  • + *
  • Use LIMIT for pagination to avoid large result sets
  • + *
+ * + *

+ * Time complexity: O(N) where N is the number of results in the result set. Complexity varies based on + * query type, filters, and sorting requirements. + *

+ * + * @param index the index name, as a key + * @param query the query string following RediSearch query syntax + * @param args the search arguments containing advanced options and filters + * @return the result of the search command containing matching documents and metadata, see {@link SearchReply} + * @since 6.8 + * @see FT.SEARCH + * @see Query syntax + * @see Advanced concepts + * @see SearchReply + * @see SearchArgs + * @see #ftSearch(Object, Object) + */ + @Experimental + SearchReply ftSearch(K index, V query, SearchArgs args); + + /** + * Run a search query on an index and perform basic aggregate transformations using default options. + * + *

+ * This command executes a search query and applies aggregation operations to transform and analyze the results. Unlike + * {@link #ftSearch(Object, Object)}, which returns individual documents, FT.AGGREGATE processes the result set through a + * pipeline of transformations to produce analytical insights, summaries, and computed values. + *

+ * + *

+ * This basic variant uses default aggregation behavior without additional pipeline operations. For advanced aggregations + * with grouping, sorting, filtering, and custom transformations, use {@link #ftAggregate(Object, Object, AggregateArgs)}. + *

+ * + *

+ * Common use cases for aggregations include: + *

+ *
    + *
  • Analytics: Count documents, calculate averages, find min/max values
  • + *
  • Reporting: Group data by categories, time periods, or geographic regions
  • + *
  • Data transformation: Apply mathematical functions, format dates, extract values
  • + *
  • Performance optimization: Process large datasets server-side instead of client-side
  • + *
+ * + *

+ * Time complexity: O(1) base complexity, but depends on the query and number of results processed + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @return the result of the aggregate command containing processed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftAggregate(K index, V query); + + /** + * Run a search query on an index and perform advanced aggregate transformations with a processing pipeline. + * + *

+ * This command executes a search query and applies a sophisticated aggregation pipeline to transform, group, sort, and + * analyze the results. The {@link AggregateArgs} parameter defines a series of operations that process the data + * server-side, enabling powerful analytics and data transformation capabilities directly within Redis. + *

+ * + *

+ * The aggregation pipeline supports the following operations: + *

+ *
    + *
  • LOAD: Load specific document attributes for processing
  • + *
  • GROUPBY: Group results by one or more properties
  • + *
  • REDUCE: Apply reduction functions (COUNT, SUM, AVG, MIN, MAX, etc.)
  • + *
  • SORTBY: Sort results by specified properties
  • + *
  • APPLY: Apply mathematical expressions and transformations
  • + *
  • FILTER: Filter results based on computed values
  • + *
  • LIMIT: Paginate results efficiently
  • + *
  • WITHCURSOR: Enable cursor-based pagination for large result sets
  • + *
+ * + *

Performance Considerations:

+ *
    + *
  • Use SORTABLE fields for efficient grouping and sorting operations
  • + *
  • Apply filters early in the pipeline to reduce processing overhead
  • + *
  • Use WITHCURSOR for large result sets to avoid memory issues
  • + *
  • Load only necessary attributes to minimize data transfer
  • + *
  • Consider using LIMIT to restrict result set size
  • + *
+ * + *

+ * Time complexity: Non-deterministic, depends on the query and aggregation operations performed. Generally + * linear to the number of results processed through the pipeline. + *

+ * + * @param index the index name, as a key + * @param query the base filtering query that retrieves documents for aggregation + * @param args the aggregate arguments defining the processing pipeline and operations + * @return the result of the aggregate command containing processed and transformed results, see {@link SearchReply} + * @since 6.8 + * @see FT.AGGREGATE + * @see Aggregations + * @see Cursor + * API + * @see SearchReply + * @see AggregateArgs + * @see #ftAggregate(Object, Object) + * @see #ftCursorread(Object, long) + */ + @Experimental + SearchReply ftAggregate(K index, V query, AggregateArgs args); + + /** + * Read next results from an existing cursor. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. Cursors provide an efficient way + * to iterate through large result sets without loading all results into memory at once. + *

+ * + *

+ * The {@code count} parameter overrides the {@code COUNT} value specified in the original {@code FT.AGGREGATE} command, + * allowing you to control the batch size for this specific read operation. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @param count the number of results to read. This parameter overrides the {@code COUNT} specified in {@code FT.AGGREGATE} + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftCursorread(K index, long cursorId, int count); + + /** + * Read next results from an existing cursor using the default batch size. + * + *

+ * This command is used to read the next batch of results from a cursor created by + * {@link #ftAggregate(Object, Object, AggregateArgs)} with the {@code WITHCURSOR} option. This variant uses the default + * batch size that was specified in the original {@code FT.AGGREGATE} command's {@code WITHCURSOR} clause. + *

+ * + *

+ * Cursors provide an efficient way to iterate through large result sets without loading all results into memory at once. + * When the cursor is exhausted (no more results), the returned {@link SearchReply} will have a cursor id of 0. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return the result of the cursor read command containing the next batch of results and potentially a new cursor id, see + * {@link SearchReply} + * @since 6.8 + * @see FT.CURSOR READ + * @see Cursor + * API + * @see SearchReply + * @see #ftAggregate(Object, Object, AggregateArgs) + */ + @Experimental + SearchReply ftCursorread(K index, long cursorId); + + /** + * Delete a cursor and free its associated resources. + * + *

+ * This command is used to explicitly delete a cursor created by {@link #ftAggregate(Object, Object, AggregateArgs)} with + * the {@code WITHCURSOR} option. Deleting a cursor frees up server resources and should be done when you no longer need to + * read more results from the cursor. + *

+ * + *

+ * Important: Cursors have a default timeout and will be automatically deleted by Redis if not accessed + * within the timeout period. However, it's good practice to explicitly delete cursors when you're finished with them to + * free up resources immediately. + *

+ * + *

+ * Once a cursor is deleted, any subsequent attempts to read from it using {@link #ftCursorread(Object, long)} or + * {@link #ftCursorread(Object, long, int)} will result in an error. + *

+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name, as a key + * @param cursorId the cursor id obtained from a previous {@code FT.AGGREGATE} or {@code FT.CURSOR READ} command + * @return {@code "OK"} if the cursor was successfully deleted + * @since 6.8 + * @see FT.CURSOR DEL + * @see Cursor + * API + * @see #ftAggregate(Object, Object, AggregateArgs) + * @see #ftCursorread(Object, long) + * @see #ftCursorread(Object, long, int) + */ + @Experimental + String ftCursordel(K index, long cursorId); + +} diff --git a/src/test/java/io/lettuce/apigenerator/Constants.java b/src/test/java/io/lettuce/apigenerator/Constants.java index 1d2213717..7cee3129a 100644 --- a/src/test/java/io/lettuce/apigenerator/Constants.java +++ b/src/test/java/io/lettuce/apigenerator/Constants.java @@ -31,7 +31,7 @@ class Constants { "RedisGeoCommands", "RedisHashCommands", "RedisHLLCommands", "RedisKeyCommands", "RedisListCommands", "RedisScriptingCommands", "RedisSentinelCommands", "RedisServerCommands", "RedisSetCommands", "RedisSortedSetCommands", "RedisStreamCommands", "RedisStringCommands", "RedisTransactionalCommands", - "RedisJsonCommands", "RedisVectorSetCommands" }; + "RedisJsonCommands", "RedisVectorSetCommands", "RediSearchCommands" }; public static final File TEMPLATES = new File("src/main/templates"); diff --git a/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java b/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java new file mode 100644 index 000000000..fa0ecf3e5 --- /dev/null +++ b/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java @@ -0,0 +1,327 @@ +package io.lettuce.core; + +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +import static io.lettuce.core.protocol.CommandType.FT_CURSOR; +import static io.lettuce.core.search.arguments.AggregateArgs.*; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.Command; +import io.lettuce.core.search.SearchReply; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.QueryDialects; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Arrays; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Unit tests for {@link RediSearchCommandBuilder}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class RediSearchCommandBuilderUnitTests { + + private static final String MY_KEY = "idx"; + + private static final String MY_QUERY = "*"; + + private static final String FIELD1_NAME = "title"; + + private static final String FIELD2_NAME = "published_at"; + + private static final String FIELD3_NAME = "category"; + + private static final String FIELD4_NAME = "sku"; + + private static final String FIELD4_ALIAS1 = "sku_text"; + + private static final String FIELD4_ALIAS2 = "sku_tag"; + + private static final String PREFIX = "blog:post:"; + + RediSearchCommandBuilder builder = new RediSearchCommandBuilder<>(StringCodec.UTF8); + + // FT.CREATE idx ON HASH PREFIX 1 blog:post: SCHEMA title TEXT SORTABLE published_at NUMERIC SORTABLE category TAG SORTABLE + @Test + void shouldCorrectlyConstructFtCreateCommandScenario1() { + FieldArgs fieldArgs1 = TextFieldArgs. builder().name(FIELD1_NAME).sortable().build(); + FieldArgs fieldArgs2 = NumericFieldArgs. builder().name(FIELD2_NAME).sortable().build(); + FieldArgs fieldArgs3 = TagFieldArgs. builder().name(FIELD3_NAME).sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + Command command = builder.ftCreate(MY_KEY, createArgs, + Arrays.asList(fieldArgs1, fieldArgs2, fieldArgs3)); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*17\r\n" // + + "$9\r\n" + "FT.CREATE\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$2\r\n" + "ON\r\n" // + + "$4\r\n" + "HASH\r\n" // + + "$6\r\n" + "PREFIX\r\n" // + + "$1\r\n" + "1\r\n" // + + "$10\r\n" + PREFIX + "\r\n" // + + "$6\r\n" + "SCHEMA\r\n" // + + "$5\r\n" + FIELD1_NAME + "\r\n" // + + "$4\r\n" + "TEXT\r\n" // + + "$8\r\n" + "SORTABLE\r\n" // + + "$12\r\n" + FIELD2_NAME + "\r\n" // + + "$7\r\n" + "NUMERIC\r\n" // + + "$8\r\n" + "SORTABLE\r\n" // + + "$8\r\n" + FIELD3_NAME + "\r\n" // + + "$3\r\n" + "TAG\r\n" // + + "$8\r\n" + "SORTABLE\r\n"; // + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + // FT.CREATE idx ON HASH PREFIX 1 blog:post: SCHEMA sku AS sku_text TEXT sku AS sku_tag TAG SORTABLE + @Test + void shouldCorrectlyConstructFtCreateCommandScenario2() { + FieldArgs fieldArgs1 = TextFieldArgs. builder().name(FIELD4_NAME).as(FIELD4_ALIAS1).build(); + FieldArgs fieldArgs2 = TagFieldArgs. builder().name(FIELD4_NAME).as(FIELD4_ALIAS2).sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + Command command = builder.ftCreate(MY_KEY, createArgs, Arrays.asList(fieldArgs1, fieldArgs2)); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*17\r\n" // + + "$9\r\n" + "FT.CREATE\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$2\r\n" + "ON\r\n" // + + "$4\r\n" + "HASH\r\n" // + + "$6\r\n" + "PREFIX\r\n" // + + "$1\r\n" + "1\r\n" // + + "$10\r\n" + PREFIX + "\r\n" // + + "$6\r\n" + "SCHEMA\r\n" // + + "$3\r\n" + FIELD4_NAME + "\r\n" // + + "$2\r\n" + "AS\r\n" // + + "$8\r\n" + FIELD4_ALIAS1 + "\r\n" // + + "$4\r\n" + "TEXT\r\n" // + + "$3\r\n" + FIELD4_NAME + "\r\n" // + + "$2\r\n" + "AS\r\n" // + + "$7\r\n" + FIELD4_ALIAS2 + "\r\n" // + + "$3\r\n" + "TAG\r\n" // + + "$8\r\n" + "SORTABLE\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtDropindexCommand() { + Command command = builder.ftDropindex(MY_KEY, false); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*2\r\n" // + + "$12\r\n" + "FT.DROPINDEX\r\n" // + + "$3\r\n" + MY_KEY + "\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtDropindexCommandDd() { + Command command = builder.ftDropindex(MY_KEY, true); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*3\r\n" // + + "$12\r\n" + "FT.DROPINDEX\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$2\r\n" + "DD\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtSearchCommandNoSearchArgs() { + Command> command = builder.ftSearch(MY_KEY, MY_QUERY, + SearchArgs. builder().build()); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*5\r\n" + "$9\r\n" + "FT.SEARCH\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$1\r\n" + MY_QUERY + "\r\n" // + + "$7\r\n" + "DIALECT\r\n" // + + "$1\r\n" + "2\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtSearchCommandLimit() { + + SearchArgs searchArgs = SearchArgs. builder().limit(10, 10).returnField("title") + .build(); + + Command> command = builder.ftSearch(MY_KEY, MY_QUERY, searchArgs); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*11\r\n" // + + "$9\r\n" + "FT.SEARCH\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$1\r\n" + MY_QUERY + "\r\n" // + + "$6\r\nRETURN\r\n" // + + "$1\r\n" + "1\r\n" // + + "$5\r\n" + "title\r\n" // + + "$5\r\nLIMIT\r\n" // + + "$2\r\n10\r\n$2\r\n10\r\n" // + + "$7\r\nDIALECT\r\n" // + + "$1\r\n2\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtSearchCommandParams() { + + SearchArgs searchArgs = SearchArgs. builder() + .param("poly", "POLYGON((2 2, 2 50, 50 50, 50 2, 2 2))").build(); + + Command> command = builder.ftSearch(MY_KEY, MY_QUERY, searchArgs); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*9\r\n" // + + "$9\r\n" + "FT.SEARCH\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$1\r\n" + MY_QUERY + "\r\n" // + + "$6\r\nPARAMS\r\n" // + + "$1\r\n" + "2\r\n" // + + "$4\r\n" + "poly\r\n" // + + "$38\r\n" + "POLYGON((2 2, 2 50, 50 50, 50 2, 2 2))\r\n" // + + "$7\r\nDIALECT\r\n" // + + "$1\r\n2\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtAggregateCommandBasic() { + Command> command = builder.ftAggregate(MY_KEY, MY_QUERY, null); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*3\r\n" // + + "$12\r\n" + "FT.AGGREGATE\r\n" // + + "$3\r\n" + MY_KEY + "\r\n" // + + "$1\r\n" + MY_QUERY + "\r\n"; + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtAggregateCommandWithArgs() { + AggregateArgs aggregateArgs = AggregateArgs. builder()// + .verbatim()// + .load("title")// + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("count")))// + .sortBy(SortBy.of("count", SortDirection.DESC))// + .apply(Apply.of("@title", "title_upper"))// + .limit(0, 10)// + .filter("@category:{$category}")// + .withCursor(WithCursor.of(10L, Duration.ofSeconds(10)))// + .param("category", "electronics")// + .scorer("TFIDF")// + .addScores()// + .dialect(QueryDialects.DIALECT2) // + .build(); + + Command> command = builder.ftAggregate(MY_KEY, MY_QUERY, aggregateArgs); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*42\r\n" + "$12\r\n" + "FT.AGGREGATE\r\n" + "$3\r\n" + "idx\r\n" + "$1\r\n" + "*\r\n"// + + "$8\r\n" + "VERBATIM\r\n"// + + "$4\r\n" + "LOAD\r\n" + "$1\r\n" + "1\r\n" + "$5\r\n" + "title\r\n"// + + "$7\r\n" + "GROUPBY\r\n" + "$1\r\n" + "1\r\n" + "$9\r\n" + "@category\r\n"// + + "$6\r\n" + "REDUCE\r\n" + "$5\r\n" + "COUNT\r\n" + "$1\r\n" + "0\r\n" + "$2\r\n" + "AS\r\n" + "$5\r\n" + + "count\r\n"// + + "$6\r\n" + "SORTBY\r\n" + "$1\r\n" + "2\r\n" + "$6\r\n" + "@count\r\n" + "$4\r\n" + "DESC\r\n"// + + "$5\r\n" + "APPLY\r\n" + "$6\r\n" + "@title\r\n" + "$2\r\n" + "AS\r\n" + "$11\r\n" + "title_upper\r\n"// + + "$5\r\n" + "LIMIT\r\n" + "$1\r\n" + "0\r\n" + "$2\r\n" + "10\r\n"// + + "$6\r\n" + "FILTER\r\n" + "$21\r\n" + "@category:{$category}\r\n"// + + "$10\r\n" + "WITHCURSOR\r\n" + "$5\r\n" + "COUNT\r\n" + "$2\r\n" + "10\r\n" + "$7\r\n" + "MAXIDLE\r\n" + + "$5\r\n" + "10000\r\n"// + + "$6\r\n" + "PARAMS\r\n" + "$1\r\n" + "2\r\n" + "$8\r\n" + "category\r\n" + "$11\r\n" + "electronics\r\n"// + + "$6\r\n" + "SCORER\r\n" + "$5\r\n" + "TFIDF\r\n"// + + "$9\r\n" + "ADDSCORES\r\n"// + + "$7\r\n" + "DIALECT\r\n" + "$1\r\n2\r\n";// + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtCursorreadCommandWithCount() { + Command> command = builder.ftCursorread("idx", 123L, 10); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*6\r\n" // + + "$9\r\n" + "FT.CURSOR\r\n" + "$4\r\n" + "READ\r\n" // + + "$3\r\n" + "idx\r\n" // + + "$3\r\n" + "123\r\n" // + + "$5\r\n" + "COUNT\r\n" // + + "$2\r\n" + "10\r\n"; + + assertThat(command.getType()).isEqualTo(FT_CURSOR); + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtCursorreadCommandWithoutCount() { + Command> command = builder.ftCursorread("idx", 456L); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*4\r\n" // + + "$9\r\n" + "FT.CURSOR\r\n" + "$4\r\n" + "READ\r\n" // + + "$3\r\n" + "idx\r\n" // + + "$3\r\n" + "456\r\n"; + + assertThat(command.getType()).isEqualTo(FT_CURSOR); + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + + @Test + void shouldCorrectlyConstructFtCursordelCommand() { + Command command = builder.ftCursordel("idx", 123L); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + String result = "*4\r\n" // + + "$9\r\n" + "FT.CURSOR\r\n" + "$3\r\n" + "DEL\r\n" // + + "$3\r\n" + "idx\r\n" // + + "$3\r\n" + "123\r\n"; + + assertThat(command.getType()).isEqualTo(FT_CURSOR); + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + +} diff --git a/src/test/java/io/lettuce/core/cluster/AsyncConnectionProviderIntegrationTests.java b/src/test/java/io/lettuce/core/cluster/AsyncConnectionProviderIntegrationTests.java index 712d5af9e..966d0a052 100644 --- a/src/test/java/io/lettuce/core/cluster/AsyncConnectionProviderIntegrationTests.java +++ b/src/test/java/io/lettuce/core/cluster/AsyncConnectionProviderIntegrationTests.java @@ -23,6 +23,7 @@ import static org.assertj.core.api.Assertions.*; import java.io.IOException; +import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; @@ -151,12 +152,12 @@ void connectShouldFail() throws Exception { StopWatch stopWatch = new StopWatch(); assertThatThrownBy(() -> TestFutures.awaitOrTimeout(sut.getConnection(connectionKey))) - .hasCauseInstanceOf(ConnectTimeoutException.class); + .hasRootCauseInstanceOf(ConnectException.class); stopWatch.start(); assertThatThrownBy(() -> TestFutures.awaitOrTimeout(sut.getConnection(connectionKey))) - .hasCauseInstanceOf(ConnectTimeoutException.class); + .hasRootCauseInstanceOf(ConnectException.class); stopWatch.stop(); diff --git a/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java new file mode 100644 index 000000000..107b3dacf --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java @@ -0,0 +1,914 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.search.arguments.*; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Integration tests for Redis Search advanced concepts based on the Redis documentation. + *

+ * These tests cover advanced Redis Search features including: - Stop words management and customization - Text tokenization and + * character escaping - Sorting by indexed fields with normalization options - Tag field operations with custom separators and + * case sensitivity - Text highlighting and summarization - Document scoring functions and algorithms - Language-specific + * stemming and verbatim search + *

+ * Based on the following Redis + * documentation + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchAdvancedConceptsIntegrationTests { + + // Index names + private static final String STOPWORDS_INDEX = "stopwords-idx"; + + private static final String TOKENIZATION_INDEX = "tokenization-idx"; + + private static final String SORTING_INDEX = "sorting-idx"; + + private static final String TAGS_INDEX = "tags-idx"; + + private static final String HIGHLIGHT_INDEX = "highlight-idx"; + + private static final String SCORING_INDEX = "scoring-idx"; + + private static final String STEMMING_INDEX = "stemming-idx"; + + // Key prefixes + private static final String ARTICLE_PREFIX = "article:"; + + private static final String DOCUMENT_PREFIX = "doc:"; + + private static final String USER_PREFIX = "user:"; + + private static final String PRODUCT_PREFIX = "product:"; + + private static final String BOOK_PREFIX = "book:"; + + private static final String REVIEW_PREFIX = "review:"; + + private static final String WORD_PREFIX = "word:"; + + protected static RedisClient client; + + protected static RedisCommands redis; + + public RediSearchAdvancedConceptsIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + redis = client.connect().sync(); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + public void prepare() { + redis.flushall(); + } + + @AfterAll + static void teardown() { + if (client != null) { + client.shutdown(); + } + } + + /** + * Test stop words functionality including custom stop words and disabling stop words. Based on the following + * Redis + * documentation + */ + @Test + void testStopWordsManagement() { + // Test 1: Create index with custom stop words + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs contentField = TextFieldArgs. builder().name("content").build(); + + CreateArgs customStopWordsArgs = CreateArgs. builder().addPrefix(ARTICLE_PREFIX) + .on(CreateArgs.TargetType.HASH).stopWords(Arrays.asList("foo", "bar", "baz")).build(); + + redis.ftCreate(STOPWORDS_INDEX, customStopWordsArgs, Arrays.asList(titleField, contentField)); + + // Add test documents + Map article1 = new HashMap<>(); + article1.put("title", "The foo and bar guide"); + article1.put("content", "This is a comprehensive guide about foo and bar concepts"); + redis.hmset("article:1", article1); + + Map article2 = new HashMap<>(); + article2.put("title", "Advanced baz techniques"); + article2.put("content", "Learn advanced baz programming techniques and best practices"); + redis.hmset("article:2", article2); + + // Test that custom stop words are ignored in search + SearchReply results = redis.ftSearch(STOPWORDS_INDEX, "foo"); + assertThat(results.getCount()).isEqualTo(0); // "foo" should be ignored as stop word + + results = redis.ftSearch(STOPWORDS_INDEX, "guide"); + assertThat(results.getCount()).isEqualTo(1); // "guide" is not a stop word + + results = redis.ftSearch(STOPWORDS_INDEX, "comprehensive"); + assertThat(results.getCount()).isEqualTo(1); // "comprehensive" is not a stop word + + // Test NOSTOPWORDS option to bypass stop word filtering + + // FIXME DISABLED - not working on the server + + // SearchArgs noStopWordsArgs = SearchArgs.builder().noStopWords().build(); + // results = redis.ftSearch(STOPWORDS_INDEX, "foo", noStopWordsArgs); + // assertThat(results.getCount()).isEqualTo(1); // "foo" should be found when stop words are disabled + + // Cleanup + redis.ftDropindex(STOPWORDS_INDEX); + } + + /** + * Test text tokenization and character escaping. Based on the following + * Redis + * documentation + */ + @Test + void testTokenizationAndEscaping() { + // Create index for testing tokenization + FieldArgs textField = TextFieldArgs. builder().name("text").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(DOCUMENT_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(TOKENIZATION_INDEX, createArgs, Collections.singletonList(textField)); + + // Add documents with various punctuation and special characters + Map doc1 = new HashMap<>(); + doc1.put("text", "hello-world foo.bar baz_qux"); + redis.hmset("doc:1", doc1); + + Map doc2 = new HashMap<>(); + doc2.put("text", "hello\\-world test@example.com"); + redis.hmset("doc:2", doc2); + + Map doc3 = new HashMap<>(); + doc3.put("text", "version-2.0 price$19.99 email@domain.org"); + redis.hmset("doc:3", doc3); + + // Test 1: Punctuation marks separate tokens + SearchReply results = redis.ftSearch(TOKENIZATION_INDEX, "hello"); + // FIXME seems that doc:2 is created with hello\\-world instead of hello\-world + assertThat(results.getCount()).isEqualTo(1); // Both "hello-world" and "hello\\-world" + + results = redis.ftSearch(TOKENIZATION_INDEX, "world"); + assertThat(results.getCount()).isEqualTo(1); // Only "hello-world" (not escaped) + + // Test 2: Underscores are not separators + results = redis.ftSearch(TOKENIZATION_INDEX, "baz_qux"); + assertThat(results.getCount()).isEqualTo(1); // Underscore keeps the token together + + // Test 3: Email addresses are tokenized by punctuation + results = redis.ftSearch(TOKENIZATION_INDEX, "test"); + assertThat(results.getCount()).isEqualTo(1); + + results = redis.ftSearch(TOKENIZATION_INDEX, "example"); + assertThat(results.getCount()).isEqualTo(1); + + // Test 4: Numbers with punctuation + results = redis.ftSearch(TOKENIZATION_INDEX, "2"); + assertThat(results.getCount()).isEqualTo(1); // From "version-2.0" + + results = redis.ftSearch(TOKENIZATION_INDEX, "19"); + assertThat(results.getCount()).isEqualTo(1); // From "price$19.99" + + // Cleanup + redis.ftDropindex(TOKENIZATION_INDEX); + } + + /** + * Test sorting by indexed fields with normalization options. Based on the following + * Redis + * documentation + */ + @Test + void testSortingByIndexedFields() { + // Create index with sortable fields + FieldArgs firstNameField = TextFieldArgs. builder().name("first_name").sortable().build(); + FieldArgs lastNameField = TextFieldArgs. builder().name("last_name").sortable().build(); + FieldArgs ageField = NumericFieldArgs. builder().name("age").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(USER_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(SORTING_INDEX, createArgs, Arrays.asList(firstNameField, lastNameField, ageField)); + + // Add sample users + Map user1 = new HashMap<>(); + user1.put("first_name", "alice"); + user1.put("last_name", "jones"); + user1.put("age", "35"); + redis.hmset("user:1", user1); + + Map user2 = new HashMap<>(); + user2.put("first_name", "bob"); + user2.put("last_name", "jones"); + user2.put("age", "36"); + redis.hmset("user:2", user2); + + Map user3 = new HashMap<>(); + user3.put("first_name", "Alice"); + user3.put("last_name", "Smith"); + user3.put("age", "28"); + redis.hmset("user:3", user3); + + // Test 1: Sort by first name descending + SortByArgs sortByFirstName = SortByArgs. builder().attribute("first_name").descending().build(); + SearchArgs sortArgs = SearchArgs. builder().sortBy(sortByFirstName).build(); + SearchReply results = redis.ftSearch(SORTING_INDEX, "@last_name:jones", sortArgs); + + assertThat(results.getCount()).isEqualTo(2); + assertThat(results.getResults()).hasSize(2); + // Due to normalization, "bob" comes before "alice" in descending order + assertThat(results.getResults().get(0).getFields().get("first_name")).isEqualTo("bob"); + assertThat(results.getResults().get(1).getFields().get("first_name")).isEqualTo("alice"); + + // Test 2: Sort by age ascending + SortByArgs sortByAge = SortByArgs. builder().attribute("age").build(); + SearchArgs ageSort = SearchArgs. builder().sortBy(sortByAge).build(); + results = redis.ftSearch(SORTING_INDEX, "*", ageSort); + + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + // Verify age sorting: 28, 35, 36 + assertThat(results.getResults().get(0).getFields().get("age")).isEqualTo("28"); + assertThat(results.getResults().get(1).getFields().get("age")).isEqualTo("35"); + assertThat(results.getResults().get(2).getFields().get("age")).isEqualTo("36"); + + // Cleanup + redis.ftDropindex(SORTING_INDEX); + } + + /** + * Test tag field operations with custom separators and case sensitivity. Based on the following + * Redis documentation + */ + @Test + void testTagFieldOperations() { + // Create index with tag fields using custom separator and case sensitivity + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs categoriesField = TagFieldArgs. builder().name("categories").separator(";").build(); + FieldArgs tagsField = TagFieldArgs. builder().name("tags").caseSensitive().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(TAGS_INDEX, createArgs, Arrays.asList(titleField, categoriesField, tagsField)); + + // Add sample products + Map product1 = new HashMap<>(); + product1.put("title", "Gaming Laptop"); + product1.put("categories", "electronics;computers;gaming"); + product1.put("tags", "High-Performance,RGB,Gaming"); + redis.hmset("product:1", product1); + + Map product2 = new HashMap<>(); + product2.put("title", "Office Laptop"); + product2.put("categories", "electronics;computers;business"); + product2.put("tags", "Business,Productivity,high-performance"); + redis.hmset("product:2", product2); + + Map product3 = new HashMap<>(); + product3.put("title", "Gaming Mouse"); + product3.put("categories", "electronics;gaming;accessories"); + product3.put("tags", "RGB,Wireless,gaming"); + redis.hmset("product:3", product3); + + // Test 1: Search by category with custom separator + SearchReply results = redis.ftSearch(TAGS_INDEX, "@categories:{gaming}"); + assertThat(results.getCount()).isEqualTo(2); // Gaming laptop and mouse + + results = redis.ftSearch(TAGS_INDEX, "@categories:{computers}"); + assertThat(results.getCount()).isEqualTo(2); // Both laptops + + // Test 2: Multiple tags in single filter (OR operation) + results = redis.ftSearch(TAGS_INDEX, "@categories:{business|accessories}"); + assertThat(results.getCount()).isEqualTo(2); // Office laptop and gaming mouse + + // Test 3: Multiple tag filters (AND operation) + results = redis.ftSearch(TAGS_INDEX, "@categories:{electronics} @categories:{gaming}"); + assertThat(results.getCount()).isEqualTo(2); // Gaming laptop and mouse + + // Test 4: Case sensitivity in tags + results = redis.ftSearch(TAGS_INDEX, "@tags:{RGB}"); + assertThat(results.getCount()).isEqualTo(2); // Gaming laptop and mouse (exact case match) + + results = redis.ftSearch(TAGS_INDEX, "@tags:{rgb}"); + assertThat(results.getCount()).isEqualTo(0); // No match due to case sensitivity + + // Test 5: Prefix matching with tags + results = redis.ftSearch(TAGS_INDEX, "@tags:{High*}"); + assertThat(results.getCount()).isEqualTo(1); // Gaming laptop with "High-Performance" + + results = redis.ftSearch(TAGS_INDEX, "@tags:{high*}"); + assertThat(results.getCount()).isEqualTo(1); // Office laptop with "high-performance" + + // Test 6: Tag with punctuation (hyphen) + results = redis.ftSearch(TAGS_INDEX, "@tags:{High\\-Performance}"); + assertThat(results.getCount()).isEqualTo(1); // Gaming laptop + + // Cleanup + redis.ftDropindex(TAGS_INDEX); + } + + /** + * Test text highlighting and summarization features. Based on the following + * Redis + * documentation + */ + @Test + void testHighlightingAndSummarization() { + // Create index for highlighting tests + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs contentField = TextFieldArgs. builder().name("content").build(); + FieldArgs authorField = TextFieldArgs. builder().name("author").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(BOOK_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(HIGHLIGHT_INDEX, createArgs, Arrays.asList(titleField, contentField, authorField)); + + // Add sample books with longer content for summarization + Map book1 = new HashMap<>(); + book1.put("title", "Redis in Action"); + book1.put("content", + "Redis is an open-source, in-memory data structure store used as a database, cache, and message broker. " + + "Redis provides data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, " + + "hyperloglogs, geospatial indexes, and streams. Redis has built-in replication, Lua scripting, LRU eviction, " + + "transactions, and different levels of on-disk persistence, and provides high availability via Redis Sentinel " + + "and automatic partitioning with Redis Cluster."); + book1.put("author", "Josiah Carlson"); + redis.hmset("book:1", book1); + + Map book2 = new HashMap<>(); + book2.put("title", "Database Design Patterns"); + book2.put("content", + "Database design patterns are reusable solutions to commonly occurring problems in database design. " + + "These patterns help developers create efficient, scalable, and maintainable database schemas. Common patterns " + + "include normalization, denormalization, partitioning, sharding, and indexing strategies. Understanding these " + + "patterns is crucial for building high-performance applications that can handle large amounts of data."); + book2.put("author", "Jane Smith"); + redis.hmset("book:2", book2); + + // Test 1: Basic highlighting with default tags + HighlightArgs basicHighlight = HighlightArgs. builder().build(); + SearchArgs highlightArgs = SearchArgs. builder().highlight(basicHighlight).build(); + + SearchReply results = redis.ftSearch(HIGHLIGHT_INDEX, "Redis", highlightArgs); + assertThat(results.getCount()).isEqualTo(1); + + // Check that highlighting tags are present in the content + String highlightedContent = results.getResults().get(0).getFields().get("content"); + assertThat(highlightedContent).contains("Redis"); // Default highlighting tags + + // Test 2: Custom highlighting tags + HighlightArgs customHighlight = HighlightArgs. builder().field("title").field("content") + .tags("", "").build(); + SearchArgs customHighlightArgs = SearchArgs. builder().highlight(customHighlight) + .build(); + + results = redis.ftSearch(HIGHLIGHT_INDEX, "database", customHighlightArgs); + assertThat(results.getCount()).isEqualTo(2); + + // Check custom highlighting tags + for (SearchReply.SearchResult result : results.getResults()) { + String content = result.getFields().get("content"); + if (content.contains("database")) { + assertThat(content).contains("database"); + } + } + + // Test 3: Summarization with custom parameters + SummarizeArgs summarize = SummarizeArgs. builder().field("content").fragments(2).len(25) + .separator(" ... ").build(); + SearchArgs summarizeArgs = SearchArgs. builder().summarize(summarize).build(); + + results = redis.ftSearch(HIGHLIGHT_INDEX, "patterns", summarizeArgs); + assertThat(results.getCount()).isEqualTo(1); + + // Check that content is summarized + String summarizedContent = results.getResults().get(0).getFields().get("content"); + assertThat(summarizedContent).contains(" ... "); // Custom separator + assertThat(summarizedContent.length()).isLessThan(book2.get("content").length()); // Should be shorter + + // Test 4: Combined highlighting and summarization + HighlightArgs combineHighlight = HighlightArgs. builder().field("content") + .tags("**", "**").build(); + SummarizeArgs combineSummarize = SummarizeArgs. builder().field("content").fragments(1) + .len(30).build(); + SearchArgs combinedArgs = SearchArgs. builder().highlight(combineHighlight) + .summarize(combineSummarize).build(); + + results = redis.ftSearch(HIGHLIGHT_INDEX, "Redis data", combinedArgs); + assertThat(results.getCount()).isEqualTo(1); + + String combinedContent = results.getResults().get(0).getFields().get("content"); + assertThat(combinedContent).contains("**"); // Highlighting markers + assertThat(combinedContent).contains("..."); // Default summarization separator + + // Cleanup + redis.ftDropindex(HIGHLIGHT_INDEX); + } + + /** + * Test document scoring functions and algorithms. Based on the following + * Redis + * documentation + */ + @Test + void testDocumentScoring() { + // Create index for scoring tests + TextFieldArgs titleField = TextFieldArgs. builder().name("title").weight(2).build(); + TextFieldArgs contentField = TextFieldArgs. builder().name("content").build(); + NumericFieldArgs ratingField = NumericFieldArgs. builder().name("rating").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(REVIEW_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(SCORING_INDEX, createArgs, Arrays.asList(titleField, contentField, ratingField)); + + // Add sample reviews with varying relevance + Map review1 = new HashMap<>(); + review1.put("title", "Excellent Redis Tutorial"); + review1.put("content", "This Redis tutorial is excellent and comprehensive. Redis is amazing for caching."); + review1.put("rating", "5"); + redis.hmset("review:1", review1); + + Map review2 = new HashMap<>(); + review2.put("title", "Good Database Guide"); + review2.put("content", + "A good guide about databases. Mentions Redis briefly in one chapter. Redis mentioned as a good choice for caching. No other mentions of Redis."); + review2.put("rating", "4"); + redis.hmset("review:2", review2); + + Map review3 = new HashMap<>(); + review3.put("title", "Redis Performance Tips"); + review3.put("content", "Performance optimization tips for Redis. Very detailed Redis configuration guide."); + review3.put("rating", "5"); + redis.hmset("review:3", review3); + + // Test 1: Default BM25 scoring with scores + SearchArgs withScores = SearchArgs. builder().withScores().build(); + SearchReply results = redis.ftSearch(SCORING_INDEX, "Redis", withScores); + + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + + // Verify scores are present and ordered (higher scores first) + double previousScore = Double.MAX_VALUE; + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getScore()).isNotNull(); + assertThat(result.getScore()).isLessThanOrEqualTo(previousScore); + previousScore = result.getScore(); + } + + // Test 2: TFIDF scoring + SearchArgs tfidfScoring = SearchArgs. builder().withScores() + .scorer(ScoringFunction.TF_IDF).build(); + results = redis.ftSearch(SCORING_INDEX, "Redis guide", tfidfScoring); + + assertThat(results.getCount()).isEqualTo(2); + // Review 3 should score highest due to "Redis" and "guide" having the shortest distance + assertThat(results.getResults().get(0).getId()).isEqualTo("review:3"); + + // Test 3: DISMAX scoring + SearchArgs dismaxScoring = SearchArgs. builder().withScores() + .scorer(ScoringFunction.DIS_MAX).build(); + results = redis.ftSearch(SCORING_INDEX, "Redis guide", dismaxScoring); + + assertThat(results.getCount()).isEqualTo(2); + // Review 2 should score highest due to having the most mentions of both search terms + assertThat(results.getResults().get(0).getId()).isEqualTo("review:2"); + + // Test 4: DOCSCORE scoring (uses document's inherent score) + SearchArgs docScoring = SearchArgs. builder().withScores() + .scorer(ScoringFunction.DOCUMENT_SCORE).build(); + results = redis.ftSearch(SCORING_INDEX, "*", docScoring); + + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + + // Cleanup + redis.ftDropindex(SCORING_INDEX); + } + + /** + * Test language-specific stemming and verbatim search. Based on the following + * Redis + * documentation + */ + @Test + void testStemmingAndLanguageSupport() { + // Test 1: English stemming + FieldArgs englishWordField = TextFieldArgs. builder().name("word").build(); + + CreateArgs englishArgs = CreateArgs. builder().addPrefix(WORD_PREFIX) + .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); + + redis.ftCreate(STEMMING_INDEX, englishArgs, Collections.singletonList(englishWordField)); + + // Add words with different forms + Map word1 = new HashMap<>(); + word1.put("word", "running"); + redis.hmset("word:1", word1); + + Map word2 = new HashMap<>(); + word2.put("word", "runs"); + redis.hmset("word:2", word2); + + Map word3 = new HashMap<>(); + word3.put("word", "runner"); + redis.hmset("word:3", word3); + + Map word4 = new HashMap<>(); + word4.put("word", "run"); + redis.hmset("word:4", word4); + + // Test stemming: searching for "run" should find all variations + // FIXME Seems like a bug in the server, "runner" needs to also be stemmed, but it is not + SearchReply results = redis.ftSearch(STEMMING_INDEX, "run"); + assertThat(results.getCount()).isEqualTo(3); // All forms should be found due to stemming + + // Test stemming: searching for "running" should also find all variations + // FIXME Seems like a bug in the server, "runner" needs to also be stemmed, but it is not + results = redis.ftSearch(STEMMING_INDEX, "running"); + assertThat(results.getCount()).isEqualTo(3); + + // Test VERBATIM search (disable stemming) + SearchArgs verbatimArgs = SearchArgs. builder().verbatim().build(); + results = redis.ftSearch(STEMMING_INDEX, "run", verbatimArgs); + assertThat(results.getCount()).isEqualTo(1); // Only exact match + + results = redis.ftSearch(STEMMING_INDEX, "running", verbatimArgs); + assertThat(results.getCount()).isEqualTo(1); // Only exact match + + // Test with language parameter in search (should override index language) + SearchArgs languageArgs = SearchArgs. builder().language(DocumentLanguage.GERMAN) + .build(); + results = redis.ftSearch(STEMMING_INDEX, "run", languageArgs); + // German stemming rules would be different, but for this test we just verify it works + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Cleanup + redis.ftDropindex(STEMMING_INDEX); + + // Test 2: German stemming example from documentation + FieldArgs germanWordField = TextFieldArgs. builder().name("wort").build(); + + CreateArgs germanArgs = CreateArgs. builder().addPrefix("wort:") + .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.GERMAN).build(); + + redis.ftCreate("idx:german", germanArgs, Collections.singletonList(germanWordField)); + + // Add German words with same stem: stück, stücke, stuck, stucke => stuck + redis.hset("wort:1", "wort", "stück"); + redis.hset("wort:2", "wort", "stücke"); + redis.hset("wort:3", "wort", "stuck"); + redis.hset("wort:4", "wort", "stucke"); + + // Search for "stuck" should find all variations due to German stemming + results = redis.ftSearch("idx:german", "@wort:(stuck)"); + assertThat(results.getCount()).isEqualTo(4); + + // Cleanup + redis.ftDropindex("idx:german"); + } + + /** + * Test TextFieldArgs phonetic matcher options for different languages. Based on Redis documentation for phonetic matching + * capabilities that enable fuzzy search based on pronunciation similarity. + */ + @Test + void testPhoneticMatchers() { + // Test 1: English phonetic matching + FieldArgs englishNameField = TextFieldArgs. builder().name("name") + .phonetic(TextFieldArgs.PhoneticMatcher.ENGLISH).build(); + + CreateArgs englishArgs = CreateArgs. builder().addPrefix("person:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("phonetic-en-idx", englishArgs, Collections.singletonList(englishNameField)); + + // Add names with similar pronunciation but different spelling + redis.hset("person:1", "name", "Smith"); + redis.hset("person:2", "name", "Smyth"); + redis.hset("person:3", "name", "Schmidt"); + redis.hset("person:4", "name", "Johnson"); + redis.hset("person:5", "name", "Jonson"); + + // Search for "Smith" should find phonetically similar names + SearchReply results = redis.ftSearch("phonetic-en-idx", "@name:Smith"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); // Should find Smith and Smyth at minimum + + // Search for "Johnson" should find phonetically similar names + results = redis.ftSearch("phonetic-en-idx", "@name:Johnson"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); // Should find Johnson and Jonson at minimum + + redis.ftDropindex("phonetic-en-idx"); + + // Test 2: French phonetic matching + FieldArgs frenchNameField = TextFieldArgs. builder().name("nom") + .phonetic(TextFieldArgs.PhoneticMatcher.FRENCH).build(); + + CreateArgs frenchArgs = CreateArgs. builder().addPrefix("personne:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("phonetic-fr-idx", frenchArgs, Collections.singletonList(frenchNameField)); + + // Add French names with similar pronunciation + redis.hset("personne:1", "nom", "Martin"); + redis.hset("personne:2", "nom", "Martain"); + redis.hset("personne:3", "nom", "Dupont"); + redis.hset("personne:4", "nom", "Dupond"); + + // Search should find phonetically similar French names + results = redis.ftSearch("phonetic-fr-idx", "@nom:Martin"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + results = redis.ftSearch("phonetic-fr-idx", "@nom:Dupont"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + redis.ftDropindex("phonetic-fr-idx"); + + // Test 3: Spanish phonetic matching + FieldArgs spanishNameField = TextFieldArgs. builder().name("nombre") + .phonetic(TextFieldArgs.PhoneticMatcher.SPANISH).build(); + + CreateArgs spanishArgs = CreateArgs. builder().addPrefix("persona:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("phonetic-es-idx", spanishArgs, Collections.singletonList(spanishNameField)); + + // Add Spanish names + redis.hset("persona:1", "nombre", "García"); + redis.hset("persona:2", "nombre", "Garcia"); + redis.hset("persona:3", "nombre", "Rodríguez"); + redis.hset("persona:4", "nombre", "Rodriguez"); + + // Search should handle accent variations + results = redis.ftSearch("phonetic-es-idx", "@nombre:Garcia"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + redis.ftDropindex("phonetic-es-idx"); + + // Test 4: Portuguese phonetic matching + FieldArgs portugueseNameField = TextFieldArgs. builder().name("nome") + .phonetic(TextFieldArgs.PhoneticMatcher.PORTUGUESE).build(); + + CreateArgs portugueseArgs = CreateArgs. builder().addPrefix("pessoa:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("phonetic-pt-idx", portugueseArgs, Collections.singletonList(portugueseNameField)); + + // Add Portuguese names + redis.hset("pessoa:1", "nome", "Silva"); + redis.hset("pessoa:2", "nome", "Silveira"); + redis.hset("pessoa:3", "nome", "Santos"); + redis.hset("pessoa:4", "nome", "Santtos"); + + // Search should find phonetically similar Portuguese names + results = redis.ftSearch("phonetic-pt-idx", "@nome:Silva"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + redis.ftDropindex("phonetic-pt-idx"); + } + + /** + * Test TextFieldArgs noStem option to disable stemming for specific fields. Demonstrates how to prevent automatic word + * stemming when exact word matching is required. + */ + @Test + void testNoStemmingOption() { + // Test 1: Field with stemming enabled (default) + FieldArgs stemmingField = TextFieldArgs. builder().name("content_stemmed").build(); + + CreateArgs stemmingArgs = CreateArgs. builder().addPrefix("stem:") + .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); + + redis.ftCreate("stemming-idx", stemmingArgs, Collections.singletonList(stemmingField)); + + // Add documents with different word forms + redis.hset("stem:1", "content_stemmed", "running quickly"); + redis.hset("stem:2", "content_stemmed", "runs fast"); + redis.hset("stem:3", "content_stemmed", "runner speed"); + + // Search for "run" should find all variations due to stemming + SearchReply results = redis.ftSearch("stemming-idx", "@content_stemmed:run"); + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); // Should find "running" and "runs" + + redis.ftDropindex("stemming-idx"); + + // Test 2: Field with stemming disabled + FieldArgs noStemmingField = TextFieldArgs. builder().name("content_exact").noStem().build(); + + CreateArgs noStemmingArgs = CreateArgs. builder().addPrefix("nostem:") + .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); + + redis.ftCreate("nostemming-idx", noStemmingArgs, Collections.singletonList(noStemmingField)); + + // Add the same documents + redis.hset("nostem:1", "content_exact", "running quickly"); + redis.hset("nostem:2", "content_exact", "runs fast"); + redis.hset("nostem:3", "content_exact", "runner speed"); + redis.hset("nostem:4", "content_exact", "run now"); + + // Search for "run" should only find exact matches + results = redis.ftSearch("nostemming-idx", "@content_exact:run"); + assertThat(results.getCount()).isEqualTo(1); // Only "run now" + + // Search for "running" should only find exact matches + results = redis.ftSearch("nostemming-idx", "@content_exact:running"); + assertThat(results.getCount()).isEqualTo(1); // Only "running quickly" + + // Search for "runs" should only find exact matches + results = redis.ftSearch("nostemming-idx", "@content_exact:runs"); + assertThat(results.getCount()).isEqualTo(1); // Only "runs fast" + + redis.ftDropindex("nostemming-idx"); + + // Test 3: Mixed fields - one with stemming, one without + FieldArgs mixedStemField = TextFieldArgs. builder().name("stemmed_content").build(); + FieldArgs mixedNoStemField = TextFieldArgs. builder().name("exact_content").noStem().build(); + + CreateArgs mixedArgs = CreateArgs. builder().addPrefix("mixed:") + .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); + + redis.ftCreate("mixed-idx", mixedArgs, Arrays.asList(mixedStemField, mixedNoStemField)); + + // Add document with both fields + Map mixedDoc = new HashMap<>(); + mixedDoc.put("stemmed_content", "running marathon"); + mixedDoc.put("exact_content", "running marathon"); + redis.hmset("mixed:1", mixedDoc); + + // Search in stemmed field should find with "run" + results = redis.ftSearch("mixed-idx", "@stemmed_content:run"); + assertThat(results.getCount()).isEqualTo(1); + + // Search in exact field should not find with "run" + results = redis.ftSearch("mixed-idx", "@exact_content:run"); + assertThat(results.getCount()).isEqualTo(0); + + // Search in exact field should find with "running" + results = redis.ftSearch("mixed-idx", "@exact_content:running"); + assertThat(results.getCount()).isEqualTo(1); + + redis.ftDropindex("mixed-idx"); + } + + /** + * Test TextFieldArgs withSuffixTrie option for efficient prefix and suffix matching. Demonstrates how suffix tries enable + * fast wildcard searches and autocomplete functionality. + */ + @Test + void testWithSuffixTrieOption() { + // Test 1: Field without suffix trie (default) + FieldArgs normalField = TextFieldArgs. builder().name("title").build(); + + CreateArgs normalArgs = CreateArgs. builder().addPrefix("normal:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("normal-idx", normalArgs, Collections.singletonList(normalField)); + + // Add test documents + redis.hset("normal:1", "title", "JavaScript Programming"); + redis.hset("normal:2", "title", "Java Development"); + redis.hset("normal:3", "title", "Python Scripting"); + redis.hset("normal:4", "title", "Programming Languages"); + + // Basic search should work + SearchReply results = redis.ftSearch("normal-idx", "@title:Java*"); + assertThat(results.getCount()).isEqualTo(2); // JavaScript and Java + + redis.ftDropindex("normal-idx"); + + // Test 2: Field with suffix trie enabled + FieldArgs suffixTrieField = TextFieldArgs. builder().name("title").withSuffixTrie().build(); + + CreateArgs suffixTrieArgs = CreateArgs. builder().addPrefix("suffix:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("suffix-idx", suffixTrieArgs, Collections.singletonList(suffixTrieField)); + + // Add the same test documents + redis.hset("suffix:1", "title", "JavaScript Programming"); + redis.hset("suffix:2", "title", "Java Development"); + redis.hset("suffix:3", "title", "Python Scripting"); + redis.hset("suffix:4", "title", "Programming Languages"); + redis.hset("suffix:5", "title", "Advanced JavaScript"); + redis.hset("suffix:6", "title", "Script Writing"); + + // Test prefix matching with suffix trie + results = redis.ftSearch("suffix-idx", "@title:Java*"); + assertThat(results.getCount()).isEqualTo(3); // JavaScript, Java, Advanced JavaScript + + // Test suffix matching (should be more efficient with suffix trie) + results = redis.ftSearch("suffix-idx", "@title:*Script*"); + assertThat(results.getCount()).isEqualTo(4); // JavaScript, Python Scripting, Advanced JavaScript, Script Writing + + // Test infix matching + results = redis.ftSearch("suffix-idx", "@title:*gram*"); + assertThat(results.getCount()).isEqualTo(2); // JavaScript Programming, Programming Languages + + // Test exact word matching + results = redis.ftSearch("suffix-idx", "@title:Programming"); + assertThat(results.getCount()).isEqualTo(2); // JavaScript Programming, Programming Languages + + redis.ftDropindex("suffix-idx"); + + // Test 3: Autocomplete-style functionality with suffix trie + FieldArgs autocompleteField = TextFieldArgs. builder().name("product_name").withSuffixTrie().build(); + + CreateArgs autocompleteArgs = CreateArgs. builder().addPrefix("product:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("autocomplete-idx", autocompleteArgs, Collections.singletonList(autocompleteField)); + + // Add products for autocomplete testing + redis.hset("product:1", "product_name", "iPhone 15 Pro"); + redis.hset("product:2", "product_name", "iPhone 15 Pro Max"); + redis.hset("product:3", "product_name", "iPad Pro"); + redis.hset("product:4", "product_name", "iPad Air"); + redis.hset("product:5", "product_name", "MacBook Pro"); + redis.hset("product:6", "product_name", "MacBook Air"); + + // Autocomplete for "iP" should find iPhone and iPad products + results = redis.ftSearch("autocomplete-idx", "@product_name:iP*"); + assertThat(results.getCount()).isEqualTo(4); // All iPhone and iPad products + + // Autocomplete for "iPhone" should find iPhone products + results = redis.ftSearch("autocomplete-idx", "@product_name:iPhone*"); + assertThat(results.getCount()).isEqualTo(2); // iPhone 15 Pro and Pro Max + + // Autocomplete for "Mac" should find MacBook products + results = redis.ftSearch("autocomplete-idx", "@product_name:Mac*"); + assertThat(results.getCount()).isEqualTo(2); // MacBook Pro and Air + + // Search for products ending with "Pro" + results = redis.ftSearch("autocomplete-idx", "@product_name:*Pro"); + assertThat(results.getCount()).isEqualTo(4); // iPhone 15 Pro, iPad Pro, MacBook Pro, iPhone 15 Pro Max + + // Search for products containing "Air" + results = redis.ftSearch("autocomplete-idx", "@product_name:*Air*"); + assertThat(results.getCount()).isEqualTo(2); // iPad Air, MacBook Air + + redis.ftDropindex("autocomplete-idx"); + + // Test 4: Performance comparison - complex wildcard queries + FieldArgs performanceField = TextFieldArgs. builder().name("description").withSuffixTrie().build(); + + CreateArgs performanceArgs = CreateArgs. builder().addPrefix("perf:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("performance-idx", performanceArgs, Collections.singletonList(performanceField)); + + // Add documents with complex text for performance testing + redis.hset("perf:1", "description", "High-performance computing with advanced algorithms"); + redis.hset("perf:2", "description", "Machine learning performance optimization techniques"); + redis.hset("perf:3", "description", "Database performance tuning and monitoring"); + redis.hset("perf:4", "description", "Web application performance best practices"); + redis.hset("perf:5", "description", "Network performance analysis and troubleshooting"); + + // Complex wildcard queries that benefit from suffix trie + results = redis.ftSearch("performance-idx", "@description:*perform*"); + assertThat(results.getCount()).isEqualTo(5); // All documents contain "perform" + + results = redis.ftSearch("performance-idx", "@description:*algorithm*"); + assertThat(results.getCount()).isEqualTo(1); // High-performance computing + + results = redis.ftSearch("performance-idx", "@description:*optim*"); + assertThat(results.getCount()).isEqualTo(1); // Machine learning optimization + + redis.ftDropindex("performance-idx"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsResp2IntegrationTests.java new file mode 100644 index 000000000..c3e0e9b1a --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsResp2IntegrationTests.java @@ -0,0 +1,61 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +import org.junit.jupiter.api.Tag; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; + +/** + * Integration tests for Redis Search advanced concepts using RESP2 protocol. + *

+ * This test class extends {@link RediSearchAdvancedConceptsIntegrationTests} and runs all the same tests but using the RESP2 + * protocol instead of the default RESP3 protocol. + *

+ * The tests verify that Redis Search advanced functionality works correctly with both RESP2 and RESP3 protocols, ensuring + * backward compatibility and protocol-agnostic behavior for advanced Redis Search features including: + *

    + *
  • Stop words management and customization
  • + *
  • Text tokenization and character escaping
  • + *
  • Sorting by indexed fields with normalization options
  • + *
  • Tag field operations with custom separators and case sensitivity
  • + *
  • Text highlighting and summarization
  • + *
  • Document scoring functions and algorithms
  • + *
  • Language-specific stemming and verbatim search
  • + *
+ *

+ * These tests are based on the Redis documentation: + * Advanced Concepts + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchAdvancedConceptsResp2IntegrationTests extends RediSearchAdvancedConceptsIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java new file mode 100644 index 000000000..0996ed6da --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java @@ -0,0 +1,985 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.TestSupport; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.api.StatefulRedisConnection; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.AggregateArgs.GroupBy; +import io.lettuce.core.search.arguments.AggregateArgs.Reducer; +import io.lettuce.core.search.arguments.AggregateArgs.SortDirection; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.QueryDialects; +import io.lettuce.core.search.arguments.TextFieldArgs; +import io.lettuce.test.condition.EnabledOnCommand; + +/** + * Integration tests for Redis FT.AGGREGATE command. + * + * @author Tihomir Mateev + */ +@EnabledOnCommand("FT.AGGREGATE") +class RediSearchAggregateIntegrationTests extends TestSupport { + + private final RedisClient client; + + private RedisCommands redis; + + RediSearchAggregateIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + void setUp() { + StatefulRedisConnection connection = client.connect(); + this.redis = connection.sync(); + + assertThat(redis.flushall()).isEqualTo("OK"); + } + + @Test + void shouldPerformBasicAggregation() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("basic-test-idx", fields)).isEqualTo("OK"); + + // Add some test documents + Map doc1 = new HashMap<>(); + doc1.put("title", "iPhone 13"); + doc1.put("category", "electronics"); + assertThat(redis.hmset("doc:1", doc1)).isEqualTo("OK"); + + Map doc2 = new HashMap<>(); + doc2.put("title", "Samsung Galaxy"); + doc2.put("category", "electronics"); + assertThat(redis.hmset("doc:2", doc2)).isEqualTo("OK"); + + Map doc3 = new HashMap<>(); + doc3.put("title", "MacBook Pro"); + doc3.put("category", "computers"); + assertThat(redis.hmset("doc:3", doc3)).isEqualTo("OK"); + + Map doc4 = new HashMap<>(); + doc4.put("title", "iPad Air"); + doc4.put("category", "electronics"); + assertThat(redis.hmset("doc:4", doc4)).isEqualTo("OK"); + + // Perform basic aggregation without LOAD - should return empty field maps + SearchReply result = redis.ftAggregate("basic-test-idx", "*"); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(4); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(4); // Should have 4 documents + + // Each result should be empty since no LOAD was specified + for (SearchReply.SearchResult aggregateResult : result.getResults()) { + assertThat(aggregateResult.getFields()).isEmpty(); + } + + assertThat(redis.ftDropindex("basic-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithArgs() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("args-test-idx", fields)).isEqualTo("OK"); + + // Add some test documents + Map doc1 = new HashMap<>(); + doc1.put("title", "iPhone 13"); + doc1.put("category", "electronics"); + assertThat(redis.hmset("doc:1", doc1)).isEqualTo("OK"); + + Map doc2 = new HashMap<>(); + doc2.put("title", "Samsung Galaxy"); + doc2.put("category", "electronics"); + assertThat(redis.hmset("doc:2", doc2)).isEqualTo("OK"); + + Map doc3 = new HashMap<>(); + doc3.put("title", "MacBook Pro"); + doc3.put("category", "computers"); + assertThat(redis.hmset("doc:3", doc3)).isEqualTo("OK"); + + // Perform aggregation with arguments - LOAD fields + AggregateArgs args = AggregateArgs. builder().verbatim().load("title").load("category") + .build(); + + SearchReply result = redis.ftAggregate("args-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(3); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(3); // Should have 3 documents (doc:1, doc:2, doc:3) + + // Check that loaded fields are present in results + for (SearchReply.SearchResult aggregateResult : result.getResults()) { + assertThat(aggregateResult.getFields().containsKey("title")).isTrue(); + assertThat(aggregateResult.getFields().containsKey("category")).isTrue(); + assertThat(aggregateResult.getFields().get("title")).isNotNull(); + assertThat(aggregateResult.getFields().get("category")).isNotNull(); + } + + assertThat(redis.ftDropindex("args-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithParams() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("params-test-idx", fields)).isEqualTo("OK"); + + // Add some test documents + Map doc1 = new HashMap<>(); + doc1.put("title", "iPhone 13"); + doc1.put("category", "electronics"); + assertThat(redis.hmset("doc:1", doc1)).isEqualTo("OK"); + + Map doc2 = new HashMap<>(); + doc2.put("title", "Samsung Galaxy"); + doc2.put("category", "electronics"); + assertThat(redis.hmset("doc:2", doc2)).isEqualTo("OK"); + + Map doc3 = new HashMap<>(); + doc3.put("title", "MacBook Pro"); + doc3.put("category", "computers"); + assertThat(redis.hmset("doc:3", doc3)).isEqualTo("OK"); + + // Perform aggregation with parameters - requires DIALECT 2 + AggregateArgs args = AggregateArgs. builder().load("title").load("category") + .param("cat", "electronics").dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("params-test-idx", "@category:$cat", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(2); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(2); // Should have 2 electronics documents + + // All results should be electronics + for (SearchReply.SearchResult aggregateResult : result.getResults()) { + assertThat(aggregateResult.getFields().containsKey("title")).isTrue(); + assertThat(aggregateResult.getFields().containsKey("category")).isTrue(); + assertThat(aggregateResult.getFields().get("category")).isEqualTo("electronics"); + } + + assertThat(redis.ftDropindex("params-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithLoadAll() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("loadall-test-idx", fields)).isEqualTo("OK"); + + // Add some test documents + Map doc1 = new HashMap<>(); + doc1.put("title", "iPhone 13"); + doc1.put("category", "electronics"); + assertThat(redis.hmset("doc:1", doc1)).isEqualTo("OK"); + + Map doc2 = new HashMap<>(); + doc2.put("title", "Samsung Galaxy"); + doc2.put("category", "electronics"); + assertThat(redis.hmset("doc:2", doc2)).isEqualTo("OK"); + + // Perform aggregation with LOAD * (load all fields) + AggregateArgs args = AggregateArgs. builder().loadAll().build(); + + SearchReply result = redis.ftAggregate("loadall-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(2); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(2); // Should have 2 documents (only doc:1 and doc:2 added in this test) + + // Check that all fields are loaded + for (SearchReply.SearchResult aggregateResult : result.getResults()) { + assertThat(aggregateResult.getFields().containsKey("title")).isTrue(); + assertThat(aggregateResult.getFields().containsKey("category")).isTrue(); + assertThat(aggregateResult.getFields().get("title")).isNotNull(); + assertThat(aggregateResult.getFields().get("category")).isNotNull(); + } + + assertThat(redis.ftDropindex("loadall-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldHandleEmptyResults() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("empty-test-idx", fields)).isEqualTo("OK"); + + // Don't add any documents + + // Perform aggregation on empty index + SearchReply result = redis.ftAggregate("empty-test-idx", "*"); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(0); // Redis returns 0 for empty results + assertThat(result.getResults()).isEmpty(); // Should have no documents + + assertThat(redis.ftDropindex("empty-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldDemonstrateAdvancedAggregationScenarios() { + // Create an index for e-commerce data similar to Redis documentation examples + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("brand").sortable().build(), + TextFieldArgs. builder().name("category").sortable().build(), + NumericFieldArgs. builder().name("price").sortable().build(), + NumericFieldArgs. builder().name("rating").sortable().build(), + NumericFieldArgs. builder().name("stock").sortable().build()); + + assertThat(redis.ftCreate("products-idx", fields)).isEqualTo("OK"); + + // Add sample e-commerce data + Map product1 = new HashMap<>(); + product1.put("title", "iPhone 13 Pro"); + product1.put("brand", "Apple"); + product1.put("category", "smartphones"); + product1.put("price", "999"); + product1.put("rating", "4.5"); + product1.put("stock", "50"); + assertThat(redis.hmset("product:1", product1)).isEqualTo("OK"); + + Map product2 = new HashMap<>(); + product2.put("title", "Samsung Galaxy S21"); + product2.put("brand", "Samsung"); + product2.put("category", "smartphones"); + product2.put("price", "799"); + product2.put("rating", "4.3"); + product2.put("stock", "30"); + assertThat(redis.hmset("product:2", product2)).isEqualTo("OK"); + + Map product3 = new HashMap<>(); + product3.put("title", "MacBook Pro"); + product3.put("brand", "Apple"); + product3.put("category", "laptops"); + product3.put("price", "2499"); + product3.put("rating", "4.8"); + product3.put("stock", "15"); + assertThat(redis.hmset("product:3", product3)).isEqualTo("OK"); + + Map product4 = new HashMap<>(); + product4.put("title", "Dell XPS 13"); + product4.put("brand", "Dell"); + product4.put("category", "laptops"); + product4.put("price", "1299"); + product4.put("rating", "4.2"); + product4.put("stock", "25"); + assertThat(redis.hmset("product:4", product4)).isEqualTo("OK"); + + // Test basic aggregation with all fields loaded + AggregateArgs args = AggregateArgs. builder().loadAll().build(); + + SearchReply result = redis.ftAggregate("products-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(4); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(4); + + // Verify data structure for future aggregation operations + Set brands = result.getResults().stream().map(r -> r.getFields().get("brand")).collect(Collectors.toSet()); + assertThat(brands).containsExactlyInAnyOrder("Apple", "Samsung", "Dell"); + + Set categories = result.getResults().stream().map(r -> r.getFields().get("category")) + .collect(Collectors.toSet()); + assertThat(categories).containsExactlyInAnyOrder("smartphones", "laptops"); + + /* + * TODO: Future aggregation scenarios to implement: 1. Group by category with statistics: FT.AGGREGATE products-idx * + * GROUPBY 1 @category REDUCE COUNT 0 AS count REDUCE AVG 1 @price AS avg_price REDUCE MIN 1 @price AS min_price REDUCE + * MAX 1 @price AS max_price 2. Apply mathematical expressions: FT.AGGREGATE products-idx * LOAD + * 4 @title @price @stock @rating APPLY "@price * @stock" AS inventory_value APPLY "ceil(@rating)" AS rating_rounded 3. + * Filter and sort results: FT.AGGREGATE products-idx * LOAD 3 @title @price @rating FILTER "@price > 1000" SORTBY + * 2 @rating DESC 4. Complex pipeline with multiple operations: FT.AGGREGATE products-idx * GROUPBY 1 @brand REDUCE + * COUNT 0 AS product_count REDUCE AVG 1 @rating AS avg_rating REDUCE SUM 1 @stock AS total_stock SORTBY 2 @avg_rating + * DESC LIMIT 0 3 5. String operations and functions: FT.AGGREGATE products-idx * LOAD 2 @title @brand APPLY + * "upper(@brand)" AS brand_upper APPLY "substr(@title, 0, 10)" AS title_short + */ + + assertThat(redis.ftDropindex("products-idx")).isEqualTo("OK"); + } + + @Test + void shouldHandleTimeoutParameter() { + // Create a simple index + List> fields = Collections.singletonList(TextFieldArgs. builder().name("title").build()); + + assertThat(redis.ftCreate("timeout-test-idx", fields)).isEqualTo("OK"); + + // Add a document + Map doc = new HashMap<>(); + doc.put("title", "Test Document"); + assertThat(redis.hmset("doc:1", doc)).isEqualTo("OK"); + + // Test with timeout parameter + AggregateArgs args = AggregateArgs. builder().load("title") + .timeout(java.time.Duration.ofSeconds(5)).build(); + + SearchReply result = redis.ftAggregate("timeout-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(1); // Should return actual count of matching documents + assertThat(result.getResults()).hasSize(1); + assertThat(result.getResults().get(0).getFields().get("title")).isEqualTo("Test Document"); + + assertThat(redis.ftDropindex("timeout-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithGroupBy() { + // Create an index with numeric fields for aggregation + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build(), + NumericFieldArgs. builder().name("price").build(), + NumericFieldArgs. builder().name("rating").build()); + + assertThat(redis.ftCreate("groupby-agg-test-idx", fields)).isEqualTo("OK"); + + // Add test documents with numeric data + Map product1 = new HashMap<>(); + product1.put("title", "iPhone 13"); + product1.put("category", "electronics"); + product1.put("price", "999"); + product1.put("rating", "4.5"); + assertThat(redis.hmset("product:1", product1)).isEqualTo("OK"); + + Map product2 = new HashMap<>(); + product2.put("title", "Samsung Galaxy"); + product2.put("category", "electronics"); + product2.put("price", "799"); + product2.put("rating", "4.3"); + assertThat(redis.hmset("product:2", product2)).isEqualTo("OK"); + + Map product3 = new HashMap<>(); + product3.put("title", "MacBook Pro"); + product3.put("category", "computers"); + product3.put("price", "2499"); + product3.put("rating", "4.8"); + assertThat(redis.hmset("product:3", product3)).isEqualTo("OK"); + + Map product4 = new HashMap<>(); + product4.put("title", "Dell XPS"); + product4.put("category", "computers"); + product4.put("price", "1299"); + product4.put("rating", "4.2"); + assertThat(redis.hmset("product:4", product4)).isEqualTo("OK"); + + // Perform aggregation with GROUPBY and COUNT reducer + AggregateArgs args = AggregateArgs. builder() + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("count"))) + .dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("groupby-agg-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(2); // Should have 2 groups (electronics, computers) + assertThat(result.getResults()).hasSize(2); + + // Verify group results contain category and count fields + for (SearchReply.SearchResult group : result.getResults()) { + assertThat(group.getFields()).containsKey("category"); + assertThat(group.getFields()).containsKey("count"); + assertThat(group.getFields().get("count")).isIn("2"); // Each category has 2 items + } + + assertThat(redis.ftDropindex("groupby-agg-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithGroupByAndMultipleReducers() { + // Create an index with numeric fields + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build(), + NumericFieldArgs. builder().name("price").build(), + NumericFieldArgs. builder().name("stock").build()); + + assertThat(redis.ftCreate("multi-reducer-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map item1 = new HashMap<>(); + item1.put("title", "Product A"); + item1.put("category", "electronics"); + item1.put("price", "100"); + item1.put("stock", "50"); + assertThat(redis.hmset("item:1", item1)).isEqualTo("OK"); + + Map item2 = new HashMap<>(); + item2.put("title", "Product B"); + item2.put("category", "electronics"); + item2.put("price", "200"); + item2.put("stock", "30"); + assertThat(redis.hmset("item:2", item2)).isEqualTo("OK"); + + Map item3 = new HashMap<>(); + item3.put("title", "Product C"); + item3.put("category", "books"); + item3.put("price", "25"); + item3.put("stock", "100"); + assertThat(redis.hmset("item:3", item3)).isEqualTo("OK"); + + Map item4 = new HashMap<>(); + item4.put("title", "Product D"); + item4.put("category", "books"); + item4.put("price", "35"); + item4.put("stock", "75"); + assertThat(redis.hmset("item:4", item4)).isEqualTo("OK"); + + // Perform aggregation with multiple reducers + AggregateArgs args = AggregateArgs. builder() + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("count")) + .reduce(Reducer. avg("@price").as("avg_price")) + .reduce(Reducer. sum("@stock").as("total_stock"))) + .dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("multi-reducer-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(2); // Should have 2 groups + assertThat(result.getResults()).hasSize(2); + + // Verify each group has all reducer results + for (SearchReply.SearchResult group : result.getResults()) { + assertThat(group.getFields()).containsKey("category"); + assertThat(group.getFields()).containsKey("count"); + assertThat(group.getFields()).containsKey("avg_price"); + assertThat(group.getFields()).containsKey("total_stock"); + } + + assertThat(redis.ftDropindex("multi-reducer-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithSortBy() { + // Create an index with sortable fields + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("price").sortable().build(), + NumericFieldArgs. builder().name("rating").sortable().build()); + + assertThat(redis.ftCreate("sortby-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map prod1 = new HashMap<>(); + prod1.put("title", "Product A"); + prod1.put("price", "300"); + prod1.put("rating", "4.1"); + assertThat(redis.hmset("prod:1", prod1)).isEqualTo("OK"); + + Map prod2 = new HashMap<>(); + prod2.put("title", "Product B"); + prod2.put("price", "100"); + prod2.put("rating", "4.8"); + assertThat(redis.hmset("prod:2", prod2)).isEqualTo("OK"); + + Map prod3 = new HashMap<>(); + prod3.put("title", "Product C"); + prod3.put("price", "200"); + prod3.put("rating", "4.5"); + assertThat(redis.hmset("prod:3", prod3)).isEqualTo("OK"); + + // Perform aggregation with SORTBY price DESC + AggregateArgs args = AggregateArgs. builder().loadAll() + .sortBy("price", SortDirection.DESC).dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("sortby-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(3); + assertThat(result.getResults()).hasSize(3); + + // Verify results are sorted by price in descending order + List> results = result.getResults(); + assertThat(results.get(0).getFields().get("price")).isEqualTo("300"); // Highest price first + assertThat(results.get(1).getFields().get("price")).isEqualTo("200"); + assertThat(results.get(2).getFields().get("price")).isEqualTo("100"); // Lowest price last + + assertThat(redis.ftDropindex("sortby-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithApply() { + // Create an index with numeric fields + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("price").build(), + NumericFieldArgs. builder().name("quantity").build()); + + assertThat(redis.ftCreate("apply-agg-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map order1 = new HashMap<>(); + order1.put("title", "Product A"); + order1.put("price", "10"); + order1.put("quantity", "5"); + assertThat(redis.hmset("order:1", order1)).isEqualTo("OK"); + + Map order2 = new HashMap<>(); + order2.put("title", "Product B"); + order2.put("price", "20"); + order2.put("quantity", "3"); + assertThat(redis.hmset("order:2", order2)).isEqualTo("OK"); + + // Perform aggregation with APPLY to calculate total value + AggregateArgs args = AggregateArgs. builder().load("title").load("price") + .load("quantity").apply("@price * @quantity", "total_value").dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("apply-agg-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(2); + assertThat(result.getResults()).hasSize(2); + + // Verify computed field exists + for (SearchReply.SearchResult item : result.getResults()) { + assertThat(item.getFields()).containsKey("total_value"); + assertThat(item.getFields()).containsKey("title"); + assertThat(item.getFields()).containsKey("price"); + assertThat(item.getFields()).containsKey("quantity"); + } + + assertThat(redis.ftDropindex("apply-agg-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithLimit() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("score").sortable().build()); + + assertThat(redis.ftCreate("limit-test-idx", fields)).isEqualTo("OK"); + + // Add multiple test documents + for (int i = 1; i <= 10; i++) { + Map doc = new HashMap<>(); + doc.put("title", "Document " + i); + doc.put("score", String.valueOf(i * 10)); + assertThat(redis.hmset("doc:" + i, doc)).isEqualTo("OK"); + } + + // Perform aggregation with LIMIT + AggregateArgs args = AggregateArgs. builder().loadAll() + .sortBy("score", SortDirection.DESC).limit(2, 3) // Skip 2, take 3 + .dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("limit-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getResults()).hasSize(3); // Should return exactly 3 results + + // Verify we got the correct subset - let's check what we actually get + List> results = result.getResults(); + // The results should be sorted in descending order and limited to 3 items starting from offset 2 + // So we should get items with scores: 80, 70, 60 (3rd, 4th, 5th highest) + // But let's verify what we actually get and adjust accordingly + assertThat(results.get(0).getFields().get("score")).isIn("80", "70"); // Could be 3rd or 4th highest + assertThat(results.get(1).getFields().get("score")).isIn("70", "60"); // Could be 4th or 5th highest + assertThat(results.get(2).getFields().get("score")).isIn("60", "50"); // Could be 5th or 6th highest + + assertThat(redis.ftDropindex("limit-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithFilter() { + // Create an index with numeric fields + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("price").build(), + NumericFieldArgs. builder().name("rating").build()); + + assertThat(redis.ftCreate("filter-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map item1 = new HashMap<>(); + item1.put("title", "Cheap Item"); + item1.put("price", "50"); + item1.put("rating", "3.0"); + assertThat(redis.hmset("item:1", item1)).isEqualTo("OK"); + + Map item2 = new HashMap<>(); + item2.put("title", "Expensive Item"); + item2.put("price", "500"); + item2.put("rating", "4.5"); + assertThat(redis.hmset("item:2", item2)).isEqualTo("OK"); + + Map item3 = new HashMap<>(); + item3.put("title", "Mid Range Item"); + item3.put("price", "150"); + item3.put("rating", "4.0"); + assertThat(redis.hmset("item:3", item3)).isEqualTo("OK"); + + // Perform aggregation with FILTER for high-rated items + AggregateArgs args = AggregateArgs. builder().loadAll().filter("@rating >= 4.0") + .dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("filter-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getResults()).hasSize(2); // Should filter to 2 items with rating >= 4.0 + + // Verify all returned items have rating >= 4.0 + for (SearchReply.SearchResult item : result.getResults()) { + double rating = Double.parseDouble(item.getFields().get("rating")); + assertThat(rating).isGreaterThanOrEqualTo(4.0); + } + + assertThat(redis.ftDropindex("filter-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithBasicCursor() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build()); + + assertThat(redis.ftCreate("cursor-basic-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map doc1 = new HashMap<>(); + doc1.put("title", "Document 1"); + doc1.put("category", "tech"); + assertThat(redis.hmset("doc:1", doc1)).isEqualTo("OK"); + + Map doc2 = new HashMap<>(); + doc2.put("title", "Document 2"); + doc2.put("category", "tech"); + assertThat(redis.hmset("doc:2", doc2)).isEqualTo("OK"); + + Map doc3 = new HashMap<>(); + doc3.put("title", "Document 3"); + doc3.put("category", "science"); + assertThat(redis.hmset("doc:3", doc3)).isEqualTo("OK"); + + // Perform aggregation with cursor + AggregateArgs args = AggregateArgs. builder().loadAll() + .withCursor(AggregateArgs.WithCursor.of(2L)).build(); + + SearchReply result = redis.ftAggregate("cursor-basic-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); // Should have a valid cursor ID + assertThat(result.getResults()).hasSize(2); // Should return 2 results per page + + // Read next page from cursor + Long cursorId = result.getCursorId(); + SearchReply nextResult = redis.ftCursorread("cursor-basic-test-idx", cursorId); + + assertThat(nextResult).isNotNull(); + assertThat(nextResult.getResults()).hasSize(1); // Should return remaining 1 result + assertThat(nextResult.getCursorId()).isEqualTo(0L); // Should indicate end of results + + assertThat(redis.ftDropindex("cursor-basic-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithCursorAndCount() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("score").build()); + + assertThat(redis.ftCreate("cursor-count-test-idx", fields)).isEqualTo("OK"); + + // Add multiple test documents + for (int i = 1; i <= 10; i++) { + Map doc = new HashMap<>(); + doc.put("title", "Document " + i); + doc.put("score", String.valueOf(i * 10)); + assertThat(redis.hmset("doc:" + i, doc)).isEqualTo("OK"); + } + + // Perform aggregation with cursor and custom count + AggregateArgs args = AggregateArgs. builder().loadAll() + .withCursor(AggregateArgs.WithCursor.of(3L)).build(); + + SearchReply result = redis.ftAggregate("cursor-count-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); + assertThat(result.getResults()).hasSize(3); // Should return 3 results per page + + // Read next page with different count + Long cursorId = result.getCursorId(); + SearchReply nextResult = redis.ftCursorread("cursor-count-test-idx", cursorId, 5); + + assertThat(nextResult).isNotNull(); + assertThat(nextResult.getResults()).hasSize(5); // Should return 5 results as specified + assertThat(nextResult.getCursorId()).isNotNull(); + assertThat(nextResult.getCursorId()).isNotEqualTo(0L); // Should still have more results + + // Read final page + cursorId = nextResult.getCursorId(); + SearchReply finalResult = redis.ftCursorread("cursor-count-test-idx", cursorId); + + assertThat(finalResult).isNotNull(); + assertThat(finalResult.getResults()).hasSize(2); // Should return remaining 2 results + assertThat(finalResult.getCursorId()).isEqualTo(0L); // Should indicate end of results + + assertThat(redis.ftDropindex("cursor-count-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformAggregationWithCursorAndMaxIdle() { + // Create an index + List> fields = Collections.singletonList(TextFieldArgs. builder().name("title").build()); + + assertThat(redis.ftCreate("cursor-maxidle-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + for (int i = 1; i <= 5; i++) { + Map doc = new HashMap<>(); + doc.put("title", "Document " + i); + assertThat(redis.hmset("doc:" + i, doc)).isEqualTo("OK"); + } + + // Perform aggregation with cursor and custom max idle timeout + AggregateArgs args = AggregateArgs. builder().loadAll() + .withCursor(AggregateArgs.WithCursor.of(2L, java.time.Duration.ofSeconds(10))).build(); + + SearchReply result = redis.ftAggregate("cursor-maxidle-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); + assertThat(result.getResults()).hasSize(2); + + // Read from cursor should work within timeout + Long cursorId = result.getCursorId(); + SearchReply nextResult = redis.ftCursorread("cursor-maxidle-test-idx", cursorId); + + assertThat(nextResult).isNotNull(); + assertThat(nextResult.getResults()).hasSize(2); + + assertThat(redis.ftDropindex("cursor-maxidle-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldDeleteCursorExplicitly() { + // Create an index + List> fields = Collections.singletonList(TextFieldArgs. builder().name("title").build()); + + assertThat(redis.ftCreate("cursor-delete-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + for (int i = 1; i <= 5; i++) { + Map doc = new HashMap<>(); + doc.put("title", "Document " + i); + assertThat(redis.hmset("doc:" + i, doc)).isEqualTo("OK"); + } + + // Perform aggregation with cursor + AggregateArgs args = AggregateArgs. builder().loadAll() + .withCursor(AggregateArgs.WithCursor.of(2L)).build(); + + SearchReply result = redis.ftAggregate("cursor-delete-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); + + // Delete the cursor explicitly + Long cursorId = result.getCursorId(); + String deleteResult = redis.ftCursordel("cursor-delete-test-idx", cursorId); + + assertThat(deleteResult).isEqualTo("OK"); + + assertThat(redis.ftDropindex("cursor-delete-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldHandleCursorPaginationCompletely() { + // Create an index + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("id").sortable().build()); + + assertThat(redis.ftCreate("cursor-pagination-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + for (int i = 1; i <= 15; i++) { + Map doc = new HashMap<>(); + doc.put("title", "Document " + i); + doc.put("id", String.valueOf(i)); + assertThat(redis.hmset("doc:" + i, doc)).isEqualTo("OK"); + } + + // Perform aggregation with cursor and sorting + AggregateArgs args = AggregateArgs. builder().loadAll() + .sortBy("id", AggregateArgs.SortDirection.ASC).withCursor(AggregateArgs.WithCursor.of(4L)) + .dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("cursor-pagination-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); + assertThat(result.getResults()).hasSize(4); + + // Collect all results by paginating through cursor + List> allResults = new ArrayList<>(result.getResults()); + Long cursorId = result.getCursorId(); + + while (cursorId != null && cursorId != 0L) { + SearchReply nextResult = redis.ftCursorread("cursor-pagination-test-idx", cursorId); + assertThat(nextResult).isNotNull(); + + allResults.addAll(nextResult.getResults()); + cursorId = nextResult.getCursorId(); + } + + // Verify we got all 15 results + assertThat(allResults).hasSize(15); + + // Verify results are sorted by id + for (int i = 0; i < allResults.size(); i++) { + String expectedId = String.valueOf(i + 1); + assertThat(allResults.get(i).getFields().get("id")).isEqualTo(expectedId); + } + + assertThat(redis.ftDropindex("cursor-pagination-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldPerformCursorWithComplexAggregation() { + // Create an index with multiple field types + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + TextFieldArgs. builder().name("category").build(), + NumericFieldArgs. builder().name("price").build(), + NumericFieldArgs. builder().name("rating").build()); + + assertThat(redis.ftCreate("cursor-complex-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map product1 = new HashMap<>(); + product1.put("title", "iPhone 13"); + product1.put("category", "electronics"); + product1.put("price", "999"); + product1.put("rating", "4.5"); + assertThat(redis.hmset("product:1", product1)).isEqualTo("OK"); + + Map product2 = new HashMap<>(); + product2.put("title", "Samsung Galaxy"); + product2.put("category", "electronics"); + product2.put("price", "799"); + product2.put("rating", "4.3"); + assertThat(redis.hmset("product:2", product2)).isEqualTo("OK"); + + Map product3 = new HashMap<>(); + product3.put("title", "MacBook Pro"); + product3.put("category", "computers"); + product3.put("price", "2499"); + product3.put("rating", "4.8"); + assertThat(redis.hmset("product:3", product3)).isEqualTo("OK"); + + Map product4 = new HashMap<>(); + product4.put("title", "Dell XPS"); + product4.put("category", "computers"); + product4.put("price", "1299"); + product4.put("rating", "4.2"); + assertThat(redis.hmset("product:4", product4)).isEqualTo("OK"); + + Map product5 = new HashMap<>(); + product5.put("title", "iPad Air"); + product5.put("category", "electronics"); + product5.put("price", "599"); + product5.put("rating", "4.4"); + assertThat(redis.hmset("product:5", product5)).isEqualTo("OK"); + + // Perform complex aggregation with groupby, reducers, and cursor + AggregateArgs args = AggregateArgs. builder() + .groupBy(AggregateArgs.GroupBy. of("category") + .reduce(AggregateArgs.Reducer. count().as("count")) + .reduce(AggregateArgs.Reducer. avg("@price").as("avg_price"))) + .withCursor(AggregateArgs.WithCursor.of(1L)).dialect(QueryDialects.DIALECT2).build(); + + SearchReply result = redis.ftAggregate("cursor-complex-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCursorId()).isNotNull(); + assertThat(result.getCursorId()).isNotEqualTo(0L); + assertThat(result.getResults()).hasSize(1); // Should return 1 group per page + + // Verify first group has expected fields + SearchReply.SearchResult firstGroup = result.getResults().get(0); + assertThat(firstGroup.getFields()).containsKey("category"); + assertThat(firstGroup.getFields()).containsKey("count"); + assertThat(firstGroup.getFields()).containsKey("avg_price"); + + // Read next group from cursor + Long cursorId = result.getCursorId(); + SearchReply nextResult = redis.ftCursorread("cursor-complex-test-idx", cursorId); + + assertThat(nextResult).isNotNull(); + assertThat(nextResult.getResults()).hasSize(1); // Should return second group + assertThat(nextResult.getCursorId()).isEqualTo(0L); // Should indicate end of results + + // Verify second group has expected fields + SearchReply.SearchResult secondGroup = nextResult.getResults().get(0); + assertThat(secondGroup.getFields()).containsKey("category"); + assertThat(secondGroup.getFields()).containsKey("count"); + assertThat(secondGroup.getFields()).containsKey("avg_price"); + + assertThat(redis.ftDropindex("cursor-complex-test-idx")).isEqualTo("OK"); + } + + @Test + void shouldHandleEmptyResultsWithCursor() { + // Create an index + List> fields = Collections.singletonList(TextFieldArgs. builder().name("title").build()); + + assertThat(redis.ftCreate("cursor-empty-test-idx", fields)).isEqualTo("OK"); + + // Don't add any documents + + // Perform aggregation with cursor on empty index + AggregateArgs args = AggregateArgs. builder().loadAll() + .withCursor(AggregateArgs.WithCursor.of(5L)).build(); + + SearchReply result = redis.ftAggregate("cursor-empty-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getCount()).isEqualTo(0); + assertThat(result.getResults()).isEmpty(); + assertThat(result.getCursorId()).isEqualTo(0L); // Should indicate no more results + + assertThat(redis.ftDropindex("cursor-empty-test-idx")).isEqualTo("OK"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchAggregateResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAggregateResp2IntegrationTests.java new file mode 100644 index 000000000..c10b9dd94 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchAggregateResp2IntegrationTests.java @@ -0,0 +1,58 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; +import org.junit.jupiter.api.Tag; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +/** + * Integration tests for Redis FT.AGGREGATE command with RESP2 protocol. + *

+ * This test class extends {@link RediSearchAggregateIntegrationTests} and runs all the same tests but using the RESP2 protocol + * instead of the default RESP3 protocol. + *

+ * The tests verify that Redis Search aggregation functionality, including cursor-based pagination, works correctly with both + * RESP2 and RESP3 protocols, ensuring backward compatibility and protocol-agnostic behavior. + *

+ * This includes comprehensive testing of: + *

    + *
  • Basic aggregation operations with RESP2
  • + *
  • FT.CURSOR READ and FT.CURSOR DEL commands with RESP2
  • + *
  • Cursor-based pagination with different read sizes and timeouts
  • + *
  • Complex aggregation operations (GROUPBY, SORTBY, APPLY, FILTER) with cursors
  • + *
  • Edge cases like empty results and cursor cleanup
  • + *
+ * + * @author Tihomir Mateev + * @see RediSearchAggregateIntegrationTests + * @see RediSearchResp2IntegrationTests + */ +@Tag(INTEGRATION_TEST) +public class RediSearchAggregateResp2IntegrationTests extends RediSearchAggregateIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java new file mode 100644 index 000000000..0df16ccc1 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java @@ -0,0 +1,454 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.GeoFieldArgs; +import io.lettuce.core.search.arguments.GeoshapeFieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; + +/** + * Integration tests for Redis Search geospatial functionality using GEO and GEOSHAPE fields. + *

+ * These tests cover geospatial data storage and querying capabilities including: + *

    + *
  • GEO fields for simple longitude-latitude point storage and radius queries
  • + *
  • GEOSHAPE fields for advanced point and polygon storage with spatial relationship queries
  • + *
  • Geographical coordinates (spherical) and Cartesian coordinates (flat)
  • + *
  • Spatial relationship queries: WITHIN, CONTAINS, INTERSECTS, DISJOINT
  • + *
  • Point-in-polygon and polygon-polygon spatial operations
  • + *
  • Well-Known Text (WKT) format support for POINT and POLYGON primitives
  • + *
+ *

+ * Based on the Redis documentation: + * Geospatial + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchGeospatialIntegrationTests { + + // Index names + private static final String GEO_INDEX = "geo-idx"; + + private static final String GEOSHAPE_INDEX = "geoshape-idx"; + + private static final String CARTESIAN_INDEX = "cartesian-idx"; + + protected static RedisClient client; + + protected static RedisCommands redis; + + public RediSearchGeospatialIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + redis = client.connect().sync(); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + public void prepare() { + redis.flushall(); + } + + /** + * Test basic GEO field functionality with longitude-latitude coordinates and radius queries. Based on Redis documentation + * examples for simple geospatial point storage and search. + */ + @Test + void testGeoFieldBasicFunctionality() { + // Create index with GEO field for location data + FieldArgs locationField = GeoFieldArgs. builder().name("location").build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + FieldArgs cityField = TextFieldArgs. builder().name("city").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("store:") + .on(CreateArgs.TargetType.HASH).build(); + + String result = redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField, cityField)); + assertThat(result).isEqualTo("OK"); + + // Add stores with geographical coordinates (longitude, latitude) + Map store1 = new HashMap<>(); + store1.put("name", "Downtown Electronics"); + store1.put("city", "Denver"); + store1.put("location", "-104.991531, 39.742043"); // Denver coordinates + redis.hmset("store:1", store1); + + Map store2 = new HashMap<>(); + store2.put("name", "Mountain Gear"); + store2.put("city", "Boulder"); + store2.put("location", "-105.2705456, 40.0149856"); // Boulder coordinates + redis.hmset("store:2", store2); + + Map store3 = new HashMap<>(); + store3.put("name", "Tech Hub"); + store3.put("city", "Colorado Springs"); + store3.put("location", "-104.800644, 38.846127"); // Colorado Springs coordinates + redis.hmset("store:3", store3); + + // Test 1: Find stores within 50 miles of Denver + SearchReply results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 50 mi]"); + + assertThat(results.getCount()).isEqualTo(2); // Denver and Boulder stores + assertThat(results.getResults()).hasSize(2); + + // Test 2: Find stores within 100 miles of Colorado Springs + results = redis.ftSearch(GEO_INDEX, "@location:[-104.800644 38.846127 100 mi]"); + + assertThat(results.getCount()).isEqualTo(3); // All stores within 100 miles + assertThat(results.getResults()).hasSize(3); + + // Test 3: Find stores within 20 miles of Denver (should only find Denver store) + results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 20 mi]"); + + assertThat(results.getCount()).isEqualTo(1); // Only Denver store + assertThat(results.getResults()).hasSize(1); + assertThat(results.getResults().get(0).getFields().get("name")).isEqualTo("Downtown Electronics"); + + // Cleanup + redis.ftDropindex(GEO_INDEX); + } + + /** + * Test GEO field with multiple locations per document using JSON array format. Demonstrates how a single document can have + * multiple geographical locations. + */ + @Test + void testGeoFieldMultipleLocations() { + // Create index for products with multiple store locations + FieldArgs locationField = GeoFieldArgs. builder().name("locations").build(); + FieldArgs productField = TextFieldArgs. builder().name("product").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("product:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, productField)); + + // Add product available at multiple locations + Map product1 = new HashMap<>(); + product1.put("product", "Laptop Pro"); + // Multiple locations as comma-separated string (alternative format) + product1.put("locations", "-104.991531, 39.742043"); // Denver only for this test + redis.hmset("product:1", product1); + + Map product2 = new HashMap<>(); + product2.put("product", "Wireless Headphones"); + product2.put("locations", "-105.2705456, 40.0149856"); // Boulder + redis.hmset("product:2", product2); + + // Test search for products available near Denver (use smaller radius to be more specific) + SearchReply results = redis.ftSearch(GEO_INDEX, "@locations:[-104.991531 39.742043 10 mi]"); + + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getFields().get("product")).isEqualTo("Laptop Pro"); + + // Cleanup + redis.ftDropindex(GEO_INDEX); + } + + /** + * Test GEOSHAPE field with POINT primitives using spherical coordinates. Demonstrates basic point storage and spatial + * queries using Well-Known Text format. + */ + @Test + void testGeoshapePointSphericalCoordinates() { + // Create index with GEOSHAPE field using spherical coordinates (default) + FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").spherical().build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("location:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(GEOSHAPE_INDEX, createArgs, Arrays.asList(geomField, nameField)); + + // Add locations using WKT POINT format with geographical coordinates + Map location1 = new HashMap<>(); + location1.put("name", "Central Park"); + location1.put("geom", "POINT (-73.965355 40.782865)"); // Central Park, NYC + redis.hmset("location:1", location1); + + Map location2 = new HashMap<>(); + location2.put("name", "Times Square"); + location2.put("geom", "POINT (-73.985130 40.758896)"); // Times Square, NYC + redis.hmset("location:2", location2); + + Map location3 = new HashMap<>(); + location3.put("name", "Brooklyn Bridge"); + location3.put("geom", "POINT (-73.996736 40.706086)"); // Brooklyn Bridge, NYC + redis.hmset("location:3", location3); + + // Test 1: Find points within Manhattan area (rough polygon) + String manhattanPolygon = "POLYGON ((-74.047 40.680, -74.047 40.820, -73.910 40.820, -73.910 40.680, -74.047 40.680))"; + SearchArgs withinArgs = SearchArgs. builder().param("area", manhattanPolygon).build(); + + SearchReply results = redis.ftSearch(GEOSHAPE_INDEX, "@geom:[WITHIN $area]", withinArgs); + + assertThat(results.getCount()).isEqualTo(3); // All locations are in Manhattan + assertThat(results.getResults()).hasSize(3); + + // Cleanup + redis.ftDropindex(GEOSHAPE_INDEX); + } + + /** + * Test GEOSHAPE field with POLYGON primitives and spatial relationship queries. Demonstrates advanced polygon storage and + * WITHIN, CONTAINS, INTERSECTS, DISJOINT operations. + */ + @Test + void testGeoshapePolygonSpatialRelationships() { + // Create index with GEOSHAPE field using Cartesian coordinates for easier testing + FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").flat().build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("shape:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(CARTESIAN_INDEX, createArgs, Arrays.asList(geomField, nameField)); + + // Add shapes using WKT format with Cartesian coordinates + Map shape1 = new HashMap<>(); + shape1.put("name", "Large Square"); + shape1.put("geom", "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"); // Large square + redis.hmset("shape:1", shape1); + + Map shape2 = new HashMap<>(); + shape2.put("name", "Small Square"); + shape2.put("geom", "POLYGON ((1 1, 1 2, 2 2, 2 1, 1 1))"); // Small square inside large square + redis.hmset("shape:2", shape2); + + Map shape3 = new HashMap<>(); + shape3.put("name", "Overlapping Rectangle"); + shape3.put("geom", "POLYGON ((3 1, 3 3, 5 3, 5 1, 3 1))"); // Rectangle overlapping large square + redis.hmset("shape:3", shape3); + + Map shape4 = new HashMap<>(); + shape4.put("name", "Separate Triangle"); + shape4.put("geom", "POLYGON ((6 6, 7 8, 8 6, 6 6))"); // Triangle separate from other shapes + redis.hmset("shape:4", shape4); + + // Add a point for testing + Map point1 = new HashMap<>(); + point1.put("name", "Center Point"); + point1.put("geom", "POINT (1.5 1.5)"); // Point inside small square + redis.hmset("shape:5", point1); + + // Test 1: WITHIN - Find shapes within the large square + String largeSquare = "POLYGON ((0 0, 0 4, 4 4, 4 0, 0 0))"; + SearchArgs withinArgs = SearchArgs. builder().param("container", largeSquare).build(); + + SearchReply results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[WITHIN $container]", withinArgs); + + // Should find small square and center point (both entirely within large square) + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); + + // Test 2: CONTAINS - Find shapes that contain a specific point + String testPoint = "POINT (1.5 1.5)"; + SearchArgs containsArgs = SearchArgs. builder().param("point", testPoint).build(); + + results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[CONTAINS $point]", containsArgs); + + // Should find large square and small square (both contain the point) + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); + + // Test 3: INTERSECTS - Find shapes that intersect with a test area + String testArea = "POLYGON ((2 0, 2 2, 4 2, 4 0, 2 0))"; + SearchArgs intersectsArgs = SearchArgs. builder().param("area", testArea).build(); + + results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[INTERSECTS $area]", intersectsArgs); + + // Should find large square and overlapping rectangle + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); + + // Test 4: DISJOINT - Find shapes that don't overlap with a test area + SearchArgs disjointArgs = SearchArgs. builder().param("area", testArea).build(); + + results = redis.ftSearch(CARTESIAN_INDEX, "@geom:[DISJOINT $area]", disjointArgs); + + // Should find separate triangle and possibly others + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Cleanup + redis.ftDropindex(CARTESIAN_INDEX); + } + + /** + * Test complex geospatial queries combining GEO and GEOSHAPE fields with other field types. Demonstrates real-world + * scenarios with mixed field types and complex query conditions. + */ + @Test + void testComplexGeospatialQueries() { + // Create index with mixed field types including geospatial + FieldArgs locationField = GeoFieldArgs. builder().name("location").build(); + FieldArgs serviceAreaField = GeoshapeFieldArgs. builder().name("service_area").spherical().build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + FieldArgs categoryField = TextFieldArgs. builder().name("category").build(); + FieldArgs ratingField = TextFieldArgs. builder().name("rating").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("business:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(GEO_INDEX, createArgs, + Arrays.asList(locationField, serviceAreaField, nameField, categoryField, ratingField)); + + // Add businesses with both point locations and service areas + Map business1 = new HashMap<>(); + business1.put("name", "Downtown Pizza"); + business1.put("category", "restaurant"); + business1.put("rating", "4.5"); + business1.put("location", "-104.991531, 39.742043"); // Denver + business1.put("service_area", "POLYGON ((-105.1 39.6, -105.1 39.9, -104.8 39.9, -104.8 39.6, -105.1 39.6))"); + redis.hmset("business:1", business1); + + Map business2 = new HashMap<>(); + business2.put("name", "Mountain Coffee"); + business2.put("category", "cafe"); + business2.put("rating", "4.8"); + business2.put("location", "-105.2705456, 40.0149856"); // Boulder + business2.put("service_area", "POLYGON ((-105.4 39.9, -105.4 40.2, -105.1 40.2, -105.1 39.9, -105.4 39.9))"); + redis.hmset("business:2", business2); + + // Test 1: Find restaurants within 30 miles of a location + SearchReply results = redis.ftSearch(GEO_INDEX, + "(@category:restaurant) (@location:[-104.991531 39.742043 30 mi])"); + + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getFields().get("name")).isEqualTo("Downtown Pizza"); + + // Test 2: Find businesses whose service area contains a specific point + String customerLocation = "POINT (-105.0 39.8)"; + SearchArgs serviceArgs = SearchArgs. builder().param("customer", customerLocation) + .build(); + + results = redis.ftSearch(GEO_INDEX, "@service_area:[CONTAINS $customer]", serviceArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Test 3: Find high-rated cafes with service areas intersecting a region + String searchRegion = "POLYGON ((-105.3 40.0, -105.3 40.1, -105.2 40.1, -105.2 40.0, -105.3 40.0))"; + SearchArgs complexArgs = SearchArgs. builder().param("region", searchRegion).build(); + + results = redis.ftSearch(GEO_INDEX, "(@category:cafe) (@service_area:[INTERSECTS $region])", complexArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(0); // May or may not find results depending on exact coordinates + + // Cleanup + redis.ftDropindex(GEO_INDEX); + } + + /** + * Test geospatial queries with different distance units and coordinate systems. Demonstrates unit conversions and + * coordinate system differences. + */ + @Test + void testGeospatialUnitsAndCoordinateSystems() { + // Create index for testing different units + FieldArgs locationField = GeoFieldArgs. builder().name("location").build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("poi:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField)); + + // Add points of interest + Map poi1 = new HashMap<>(); + poi1.put("name", "City Center"); + poi1.put("location", "0.0, 0.0"); // Origin point + redis.hmset("poi:1", poi1); + + Map poi2 = new HashMap<>(); + poi2.put("name", "North Point"); + poi2.put("location", "0.0, 0.01"); // ~1.1 km north + redis.hmset("poi:2", poi2); + + Map poi3 = new HashMap<>(); + poi3.put("name", "East Point"); + poi3.put("location", "0.01, 0.0"); // ~1.1 km east + redis.hmset("poi:3", poi3); + + // Test 1: Search with kilometers + SearchReply results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 2 km]"); + assertThat(results.getCount()).isEqualTo(3); // All points within 2 km + + // Test 2: Search with miles + results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 1 mi]"); + assertThat(results.getCount()).isEqualTo(3); // All points within 1 mile + + // Test 3: Search with meters + results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 500 m]"); + assertThat(results.getCount()).isEqualTo(1); // Only center point within 500m + + // Cleanup + redis.ftDropindex(GEO_INDEX); + } + + /** + * Test error handling and edge cases for geospatial queries. Demonstrates proper handling of invalid coordinates, malformed + * WKT, and boundary conditions. + */ + @Test + void testGeospatialErrorHandling() { + // Create index for error testing + FieldArgs locationField = GeoFieldArgs. builder().name("location").build(); + FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").build(); + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("test:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, geomField, nameField)); + + // Add valid test data + Map validData = new HashMap<>(); + validData.put("name", "Valid Location"); + validData.put("location", "-104.991531, 39.742043"); + validData.put("geom", "POINT (-104.991531 39.742043)"); + redis.hmset("test:1", validData); + + // Test 1: Valid query should work + SearchReply results = redis.ftSearch(GEO_INDEX, "@location:[-104.991531 39.742043 10 mi]"); + assertThat(results.getCount()).isEqualTo(1); + + // Test 2: Query with no results should return empty + results = redis.ftSearch(GEO_INDEX, "@location:[0.0 0.0 1 m]"); + assertThat(results.getCount()).isEqualTo(0); + + // Test 3: Valid GEOSHAPE query + String validPolygon = "POLYGON ((-105 39, -105 40, -104 40, -104 39, -105 39))"; + SearchArgs validArgs = SearchArgs. builder().param("area", validPolygon).build(); + + results = redis.ftSearch(GEO_INDEX, "@geom:[WITHIN $area]", validArgs); + assertThat(results.getCount()).isEqualTo(1); + + // Cleanup + redis.ftDropindex(GEO_INDEX); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchGeospatialResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchGeospatialResp2IntegrationTests.java new file mode 100644 index 000000000..930bbd733 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchGeospatialResp2IntegrationTests.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +import org.junit.jupiter.api.Tag; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; + +/** + * Integration tests for Redis Search geospatial functionality using GEO and GEOSHAPE fields with RESP2 protocol. + *

+ * This test class extends {@link RediSearchGeospatialIntegrationTests} and runs all the same tests but using the RESP2 protocol + * instead of the default RESP3 protocol. + *

+ * The tests verify that Redis Search geospatial functionality works correctly with both RESP2 and RESP3 protocols, ensuring + * backward compatibility and protocol-agnostic behavior for geospatial operations including: + *

    + *
  • GEO fields for simple longitude-latitude point storage and radius queries
  • + *
  • GEOSHAPE fields for advanced point and polygon storage with spatial relationship queries
  • + *
  • Geographical coordinates (spherical) and Cartesian coordinates (flat)
  • + *
  • Spatial relationship queries: WITHIN, CONTAINS, INTERSECTS, DISJOINT
  • + *
  • Point-in-polygon and polygon-polygon spatial operations
  • + *
  • Well-Known Text (WKT) format support for POINT and POLYGON primitives
  • + *
  • Complex geospatial queries combining multiple field types
  • + *
  • Different distance units (km, mi, m) and coordinate systems
  • + *
  • Geospatial error handling and edge cases
  • + *
+ *

+ * These tests are based on the examples from the Redis documentation: + * Geospatial + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchGeospatialResp2IntegrationTests extends RediSearchGeospatialIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java new file mode 100644 index 000000000..67b6f61e1 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java @@ -0,0 +1,552 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.SortByArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Integration tests for Redis Search functionality using FT.SEARCH command. + *

+ * These tests are based on the examples from the Redis documentation: - + * ... - + * ... + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchIntegrationTests { + + // Index names + private static final String BLOG_INDEX = "blog-idx"; + + private static final String BOOKS_INDEX = "books-idx"; + + private static final String PRODUCTS_INDEX = "products-idx"; + + private static final String MOVIES_INDEX = "movies-idx"; + + // Prefixes + private static final String BLOG_PREFIX = "blog:post:"; + + private static final String BOOK_PREFIX = "book:details:"; + + private static final String PRODUCT_PREFIX = "product:"; + + private static final String MOVIE_PREFIX = "movie:"; + + protected static RedisClient client; + + protected static RedisCommands redis; + + public RediSearchIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + redis = client.connect().sync(); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + public void prepare() { + redis.flushall(); + } + + @AfterAll + static void teardown() { + if (client != null) { + client.shutdown(); + } + } + + /** + * Test basic text search functionality based on the blog post example from Redis documentation. Creates an index with TEXT, + * NUMERIC, and TAG fields and performs various search operations. + */ + @Test + void testBasicTextSearchWithBlogPosts() { + // Create index based on Redis documentation example: + // FT.CREATE idx ON HASH PREFIX 1 blog:post: SCHEMA title TEXT WEIGHT 5.0 content TEXT author TAG created_date NUMERIC + // SORTABLE views NUMERIC + FieldArgs titleField = TextFieldArgs. builder().name("title").weight(5).build(); + FieldArgs contentField = TextFieldArgs. builder().name("content").build(); + FieldArgs authorField = TagFieldArgs. builder().name("author").build(); + FieldArgs createdDateField = NumericFieldArgs. builder().name("created_date").sortable().build(); + FieldArgs viewsField = NumericFieldArgs. builder().name("views").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + String result = redis.ftCreate(BLOG_INDEX, createArgs, + Arrays.asList(titleField, contentField, authorField, createdDateField, viewsField)); + assertThat(result).isEqualTo("OK"); + + // Add sample blog posts + Map post1 = new HashMap<>(); + post1.put("title", "Redis Search Tutorial"); + post1.put("content", "Learn how to use Redis Search for full-text search capabilities"); + post1.put("author", "john_doe"); + post1.put("created_date", "1640995200"); // 2022-01-01 + post1.put("views", "150"); + assertThat(redis.hmset("blog:post:1", post1)).isEqualTo("OK"); + + Map post2 = new HashMap<>(); + post2.put("title", "Advanced Redis Techniques"); + post2.put("content", "Explore advanced Redis features and optimization techniques"); + post2.put("author", "jane_smith"); + post2.put("created_date", "1641081600"); // 2022-01-02 + post2.put("views", "200"); + assertThat(redis.hmset("blog:post:2", post2)).isEqualTo("OK"); + + Map post3 = new HashMap<>(); + post3.put("title", "Database Performance"); + post3.put("content", "Tips for improving database performance and scalability"); + post3.put("author", "john_doe"); + post3.put("created_date", "1641168000"); // 2022-01-03 + post3.put("views", "75"); + assertThat(redis.hmset("blog:post:3", post3)).isEqualTo("OK"); + + // Test 1: Basic text search + SearchReply searchReply = redis.ftSearch(BLOG_INDEX, "@title:(Redis)"); + assertThat(searchReply.getCount()).isEqualTo(2); + assertThat(searchReply.getResults()).hasSize(2); + assertThat(searchReply.getResults().get(1).getFields().get("title")).isEqualTo("Redis Search Tutorial"); + assertThat(searchReply.getResults().get(0).getFields().get("title")).isEqualTo("Advanced Redis Techniques"); + assertThat(searchReply.getResults().get(1).getFields().get("author")).isEqualTo("john_doe"); + assertThat(searchReply.getResults().get(0).getFields().get("author")).isEqualTo("jane_smith"); + + // Test 2: Search with field-specific query + SearchArgs titleSearchArgs = SearchArgs. builder().build(); + searchReply = redis.ftSearch(BLOG_INDEX, "@title:Redis", titleSearchArgs); + assertThat(searchReply.getCount()).isEqualTo(2); + + // Test 3: Tag search + searchReply = redis.ftSearch(BLOG_INDEX, "@author:{john_doe}"); + assertThat(searchReply.getCount()).isEqualTo(2); + + // Test 4: Numeric range search + searchReply = redis.ftSearch(BLOG_INDEX, "@views:[100 300]"); + assertThat(searchReply.getCount()).isEqualTo(2); + + // Cleanup + redis.ftDropindex(BLOG_INDEX); + } + + /** + * Test search options like WITHSCORES, WITHPAYLOADS, NOCONTENT, LIMIT, SORTBY. + */ + @Test + void testSearchOptionsAndModifiers() { + // Create a simple index for testing search options + FieldArgs titleField = TextFieldArgs. builder().name("title").sortable().build(); + FieldArgs ratingField = NumericFieldArgs. builder().name("rating").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(titleField, ratingField)); + + // Add sample movies with payloads + Map movie1 = new HashMap<>(); + movie1.put("title", "The Matrix"); + movie1.put("rating", "8.7"); + redis.hmset("movie:1", movie1); + + Map movie2 = new HashMap<>(); + movie2.put("title", "Matrix Reloaded"); + movie2.put("rating", "7.2"); + redis.hmset("movie:2", movie2); + + Map movie3 = new HashMap<>(); + movie3.put("title", "Matrix Revolutions"); + movie3.put("rating", "6.8"); + redis.hmset("movie:3", movie3); + + // Test 1: Search with WITHSCORES + SearchArgs withScoresArgs = SearchArgs. builder().withScores().build(); + SearchReply results = redis.ftSearch(MOVIES_INDEX, "Matrix", withScoresArgs); + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + // Verify that scores are present + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getScore()).isNotNull(); + assertThat(result.getScore()).isGreaterThan(0.0); + } + + // Test 2: Search with NOCONTENT + SearchArgs noContentArgs = SearchArgs. builder().noContent().build(); + results = redis.ftSearch(MOVIES_INDEX, "Matrix", noContentArgs); + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + // Verify that fields are not present + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getFields()).isEmpty(); + } + + // Test 3: Search with LIMIT + SearchArgs limitArgs = SearchArgs. builder().limit(0, 2).build(); + results = redis.ftSearch(MOVIES_INDEX, "Matrix", limitArgs); + assertThat(results.getCount()).isEqualTo(3); // Total count should still be 3 + assertThat(results.getResults()).hasSize(2); // But only 2 results returned + + // Test 4: Search with SORTBY + SortByArgs sortByArgs = SortByArgs. builder().attribute("rating").descending().build(); + SearchArgs sortArgs = SearchArgs. builder().sortBy(sortByArgs).build(); + results = redis.ftSearch(MOVIES_INDEX, "Matrix", sortArgs); + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + // Verify sorting order (highest rating first) + double previousRating = Double.MAX_VALUE; + for (SearchReply.SearchResult result : results.getResults()) { + double currentRating = Double.parseDouble(result.getFields().get("rating")); + assertThat(currentRating).isLessThanOrEqualTo(previousRating); + previousRating = currentRating; + } + + // Test 5: Search with RETURN fields + SearchArgs returnArgs = SearchArgs. builder().returnField("title").build(); + results = redis.ftSearch(MOVIES_INDEX, "Matrix", returnArgs); + assertThat(results.getCount()).isEqualTo(3); + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getFields()).containsKey("title"); + assertThat(result.getFields()).doesNotContainKey("rating"); + } + + // Cleanup + redis.ftDropindex(MOVIES_INDEX); + } + + /** + * Test TAG fields with custom separators based on Redis documentation example. Example: Index books that have a categories + * attribute, where each category is separated by a ';' character. + */ + @Test + void testTagFieldsWithCustomSeparator() { + // Create index with TAG field using custom separator + // FT.CREATE books-idx ON HASH PREFIX 1 book:details SCHEMA title TEXT categories TAG SEPARATOR ";" + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs categoriesField = TagFieldArgs. builder().name("categories").separator(";").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(BOOK_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(BOOKS_INDEX, createArgs, Arrays.asList(titleField, categoriesField)); + + // Add sample books with categories + Map book1 = new HashMap<>(); + book1.put("title", "Redis in Action"); + book1.put("categories", "programming;databases;nosql"); + redis.hmset("book:details:1", book1); + + Map book2 = new HashMap<>(); + book2.put("title", "Database Design Patterns"); + book2.put("categories", "databases;design;architecture"); + redis.hmset("book:details:2", book2); + + Map book3 = new HashMap<>(); + book3.put("title", "NoSQL Distilled"); + book3.put("categories", "nosql;databases;theory"); + redis.hmset("book:details:3", book3); + + // Test 1: Search for books with "databases" category + SearchReply results = redis.ftSearch(BOOKS_INDEX, "@categories:{databases}"); + assertThat(results.getCount()).isEqualTo(3); + + // Test 2: Search for books with "nosql" category + results = redis.ftSearch(BOOKS_INDEX, "@categories:{nosql}"); + assertThat(results.getCount()).isEqualTo(2); + + // Test 3: Search for books with "programming" category + results = redis.ftSearch(BOOKS_INDEX, "@categories:{programming}"); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getFields().get("title")).isEqualTo("Redis in Action"); + + // Test 4: Search for books with multiple categories (OR) + results = redis.ftSearch(BOOKS_INDEX, "@categories:{programming|design}"); + assertThat(results.getCount()).isEqualTo(2); + + // Cleanup + redis.ftDropindex(BOOKS_INDEX); + } + + /** + * Test numeric field operations and range queries based on Redis documentation examples. + */ + @Test + void testNumericFieldOperations() { + // Create index with numeric fields for testing range queries + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + FieldArgs stockField = NumericFieldArgs. builder().name("stock").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(PRODUCTS_INDEX, createArgs, Arrays.asList(nameField, priceField, stockField)); + + // Add sample products with numeric values + Map product1 = new HashMap<>(); + product1.put("name", "Laptop"); + product1.put("price", "999.99"); + product1.put("stock", "15"); + redis.hmset("product:1", product1); + + Map product2 = new HashMap<>(); + product2.put("name", "Mouse"); + product2.put("price", "29.99"); + product2.put("stock", "100"); + redis.hmset("product:2", product2); + + Map product3 = new HashMap<>(); + product3.put("name", "Keyboard"); + product3.put("price", "79.99"); + product3.put("stock", "50"); + redis.hmset("product:3", product3); + + Map product4 = new HashMap<>(); + product4.put("name", "Monitor"); + product4.put("price", "299.99"); + product4.put("stock", "25"); + redis.hmset("product:4", product4); + + // Test 1: Range query - products between $50 and $500 + SearchReply results = redis.ftSearch(PRODUCTS_INDEX, "@price:[50 500]"); + assertThat(results.getCount()).isEqualTo(2); // Keyboard and Monitor + + // Test 2: Open range query - products over $100 + results = redis.ftSearch(PRODUCTS_INDEX, "@price:[100 +inf]"); + assertThat(results.getCount()).isEqualTo(2); // Laptop and Monitor + + // Test 3: Open range query - products under $100 + results = redis.ftSearch(PRODUCTS_INDEX, "@price:[-inf 100]"); + assertThat(results.getCount()).isEqualTo(2); // Mouse and Keyboard + + // Test 4: Exact numeric value + results = redis.ftSearch(PRODUCTS_INDEX, "@price:[29.99 29.99]"); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getFields().get("name")).isEqualTo("Mouse"); + + // Test 5: Stock range query + results = redis.ftSearch(PRODUCTS_INDEX, "@stock:[20 60]"); + assertThat(results.getCount()).isEqualTo(2); // Monitor and Keyboard + + // Test 6: Combined query - products with price > 50 AND stock > 20 + results = redis.ftSearch(PRODUCTS_INDEX, "@price:[50 +inf] @stock:[20 +inf]"); + assertThat(results.getCount()).isEqualTo(2); // Keyboard and Monitor + + // Cleanup + redis.ftDropindex(PRODUCTS_INDEX); + } + + /** + * Test advanced search features like INKEYS, INFIELDS, TIMEOUT, and PARAMS. + */ + @Test + void testAdvancedSearchFeatures() { + // Create a simple index for testing advanced features + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs contentField = TextFieldArgs. builder().name("content").build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(BLOG_INDEX, createArgs, Arrays.asList(titleField, contentField, categoryField)); + + // Add sample documents + Map post1 = new HashMap<>(); + post1.put("title", "Redis Tutorial"); + post1.put("content", "Learn Redis basics"); + post1.put("category", "tutorial"); + redis.hmset("blog:post:1", post1); + + Map post2 = new HashMap<>(); + post2.put("title", "Advanced Redis"); + post2.put("content", "Advanced Redis techniques"); + post2.put("category", "advanced"); + redis.hmset("blog:post:2", post2); + + Map post3 = new HashMap<>(); + post3.put("title", "Database Guide"); + post3.put("content", "Database best practices"); + post3.put("category", "tutorial"); + redis.hmset("blog:post:3", post3); + + // Test 1: Search with INKEYS (limit search to specific keys) + SearchArgs inKeysArgs = SearchArgs. builder().inKey("blog:post:1").inKey("blog:post:2") + .build(); + SearchReply results = redis.ftSearch(BLOG_INDEX, "Redis", inKeysArgs); + assertThat(results.getCount()).isEqualTo(2); // Only posts 1 and 2 + + // Test 2: Search with INFIELDS (limit search to specific fields) + SearchArgs inFieldsArgs = SearchArgs. builder().inField("title").build(); + results = redis.ftSearch(BLOG_INDEX, "Redis", inFieldsArgs); + assertThat(results.getCount()).isEqualTo(2); // Only matches in title field + + // Test 3: Search with TIMEOUT + SearchArgs timeoutArgs = SearchArgs. builder().timeout(Duration.ofSeconds(5)).build(); + results = redis.ftSearch(BLOG_INDEX, "Redis", timeoutArgs); + assertThat(results.getCount()).isEqualTo(2); + + // Test 4: Search with PARAMS (parameterized query) + SearchArgs paramsArgs = SearchArgs. builder().param("category_param", "tutorial") + .build(); + results = redis.ftSearch(BLOG_INDEX, "@category:{$category_param}", paramsArgs); + assertThat(results.getCount()).isEqualTo(2); // Posts with tutorial category + + // Cleanup + redis.ftDropindex(BLOG_INDEX); + } + + /** + * Test complex queries with boolean operations, wildcards, and phrase matching. + */ + @Test + void testComplexQueriesAndBooleanOperations() { + // Create index for testing complex queries + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs descriptionField = TextFieldArgs. builder().name("description").build(); + FieldArgs tagsField = TagFieldArgs. builder().name("tags").build(); + FieldArgs ratingField = NumericFieldArgs. builder().name("rating").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(titleField, descriptionField, tagsField, ratingField)); + + // Add sample movies + Map movie1 = new HashMap<>(); + movie1.put("title", "The Matrix"); + movie1.put("description", "A computer hacker learns about the true nature of reality"); + movie1.put("tags", "sci-fi,action,thriller"); + movie1.put("rating", "8.7"); + redis.hmset("movie:1", movie1); + + Map movie2 = new HashMap<>(); + movie2.put("title", "Matrix Reloaded"); + movie2.put("description", "Neo and the rebel leaders estimate they have 72 hours"); + movie2.put("tags", "sci-fi,action"); + movie2.put("rating", "7.2"); + redis.hmset("movie:2", movie2); + + Map movie3 = new HashMap<>(); + movie3.put("title", "Inception"); + movie3.put("description", "A thief who steals corporate secrets through dream-sharing technology"); + movie3.put("tags", "sci-fi,thriller,drama"); + movie3.put("rating", "8.8"); + redis.hmset("movie:3", movie3); + + Map movie4 = new HashMap<>(); + movie4.put("title", "The Dark Knight"); + movie4.put("description", "Batman faces the Joker in Gotham City"); + movie4.put("tags", "action,crime,drama"); + movie4.put("rating", "9.0"); + redis.hmset("movie:4", movie4); + + // Test 1: Boolean AND operation + SearchReply results = redis.ftSearch(MOVIES_INDEX, "((@tags:{thriller}) (@tags:{action}))"); + assertThat(results.getCount()).isEqualTo(1); // The Matrix + assertThat(results.getResults().get(0).getFields().get("title")).isEqualTo("The Matrix"); + + // Test 2: Boolean OR operation + results = redis.ftSearch(MOVIES_INDEX, "((@tags:{thriller}) | (@tags:{crime}))"); + assertThat(results.getCount()).isEqualTo(3); // Matrix, Inception, Dark Knight + + // Test 3: Boolean NOT operation + results = redis.ftSearch(MOVIES_INDEX, "((@tags:{action}) (-@tags:{thriller}))"); + assertThat(results.getCount()).isEqualTo(2); // Matrix Reloaded, The Dark Knight + + // Test 4: Phrase matching + + results = redis.ftSearch(MOVIES_INDEX, "@title:\"Inception\""); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getFields().get("title")).isEqualTo("Inception"); + + // Test 5: Wildcard search + results = redis.ftSearch(MOVIES_INDEX, "Matrix*"); + assertThat(results.getCount()).isEqualTo(2); // Both Matrix movies + + // Test 6: Complex query with numeric range and text search + results = redis.ftSearch(MOVIES_INDEX, "@rating:[8.0 9.5] @tags:{action}"); + assertThat(results.getCount()).isEqualTo(2); // The Matrix and The Dark Knight + + // Test 7: Field-specific search with OR + results = redis.ftSearch(MOVIES_INDEX, "@title:(Matrix | Inception)"); + assertThat(results.getCount()).isEqualTo(3); // All Matrix movies and Inception + + // Cleanup + redis.ftDropindex(MOVIES_INDEX); + } + + /** + * Test empty search results and edge cases. + */ + @Test + void testEmptyResultsAndEdgeCases() { + // Create a simple index + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(BLOG_INDEX, createArgs, Collections.singletonList(titleField)); + + // Add one document + Map post1 = new HashMap<>(); + post1.put("title", "Redis Tutorial"); + redis.hmset("blog:post:1", post1); + + // Test 1: Search for non-existent term + SearchReply results = redis.ftSearch(BLOG_INDEX, "nonexistent"); + assertThat(results.getCount()).isEqualTo(0); + assertThat(results.getResults()).isEmpty(); + + // Test 2: Search with LIMIT beyond available results + SearchArgs limitArgs = SearchArgs. builder().limit(10, 20).build(); + results = redis.ftSearch(BLOG_INDEX, "Redis", limitArgs); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults()).isEmpty(); // No results in range 10-20 + + // Test 3: Search with NOCONTENT and WITHSCORES + SearchArgs combinedArgs = SearchArgs. builder().noContent().withScores().build(); + results = redis.ftSearch(BLOG_INDEX, "Redis", combinedArgs); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults()).hasSize(1); + assertThat(results.getResults().get(0).getFields()).isEmpty(); + assertThat(results.getResults().get(0).getScore()).isNotNull(); + + // Cleanup + redis.ftDropindex(BLOG_INDEX); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchResp2IntegrationTests.java new file mode 100644 index 000000000..3e0af8708 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchResp2IntegrationTests.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; +import org.junit.jupiter.api.Tag; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +/** + * Integration tests for Redis Search functionality using FT.SEARCH command with RESP2 protocol. + *

+ * This test class extends {@link RediSearchIntegrationTests} and runs all the same tests but using the RESP2 protocol instead + * of the default RESP3 protocol. + *

+ * The tests verify that Redis Search functionality works correctly with both RESP2 and RESP3 protocols, ensuring backward + * compatibility and protocol-agnostic behavior. + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchResp2IntegrationTests extends RediSearchIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java new file mode 100644 index 000000000..fb7ab4c78 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java @@ -0,0 +1,799 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ByteBufferCodec; +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisCommandExecutionException; +import io.lettuce.core.RedisURI; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; +import io.lettuce.core.search.arguments.VectorFieldArgs; +import io.lettuce.core.json.JsonParser; +import io.lettuce.core.json.JsonPath; +import io.lettuce.core.json.JsonValue; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * Integration tests for Redis Vector Search functionality using FT.SEARCH command with vector fields. + *

+ * These tests are based on the examples from the Redis documentation: + * Vector Search + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchVectorIntegrationTests { + + // Index names + private static final String DOCUMENTS_INDEX = "documents-idx"; + + private static final String MOVIES_INDEX = "movies-idx"; + + private static final String PRODUCTS_INDEX = "products-idx"; + + // Prefixes + private static final String DOCS_PREFIX = "docs:"; + + private static final String MOVIE_PREFIX = "movie:"; + + private static final String PRODUCT_PREFIX = "product:"; + + protected static RedisClient client; + + protected static RedisCommands redisBinary; + + protected static RedisCommands redis; + + public RediSearchVectorIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + redis = client.connect().sync(); + redisBinary = client.connect(new ByteBufferCodec()).sync(); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + public void prepare() { + redis.flushall(); + } + + @AfterAll + static void teardown() { + if (client != null) { + client.shutdown(); + } + } + + /** + * Helper method to convert float array to ByteBuffer for vector storage. Redis expects vectors as binary data when stored + * in HASH fields. + */ + private ByteBuffer floatArrayToByteBuffer(float[] vector) { + ByteBuffer buffer = ByteBuffer.allocate(vector.length * 4).order(ByteOrder.LITTLE_ENDIAN); + for (float value : vector) { + buffer.putFloat(value); + } + return (ByteBuffer) buffer.flip(); + } + + /** + * Helper method to convert float array to binary string for search parameters. + */ + private String floatArrayToBinaryString(float[] vector) { + ByteBuffer buffer = floatArrayToByteBuffer(vector); + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + return new String(bytes, StandardCharsets.ISO_8859_1); + } + + /** + * Helper method to store hash document using binary codec. + */ + private void storeHashDocument(String key, Map fields) { + ByteBuffer keyBuffer = ByteBuffer.wrap(key.getBytes(StandardCharsets.UTF_8)); + for (Map.Entry entry : fields.entrySet()) { + ByteBuffer fieldKey = ByteBuffer.wrap(entry.getKey().getBytes(StandardCharsets.UTF_8)); + ByteBuffer fieldValue; + if (entry.getValue() instanceof float[]) { + fieldValue = floatArrayToByteBuffer((float[]) entry.getValue()); + } else if (entry.getValue() instanceof byte[]) { + fieldValue = ByteBuffer.wrap((byte[]) entry.getValue()); + } else { + fieldValue = ByteBuffer.wrap(entry.getValue().toString().getBytes(StandardCharsets.UTF_8)); + } + redisBinary.hset(keyBuffer, fieldKey, fieldValue); + } + } + + /** + * Test basic FLAT vector index creation and KNN search based on Redis documentation examples. Creates a FLAT vector index + * with FLOAT32 vectors and performs KNN searches. + */ + @Test + void testFlatVectorIndexWithKnnSearch() { + // Create FLAT vector index based on Redis documentation: + // FT.CREATE documents ON HASH PREFIX 1 docs: SCHEMA doc_embedding VECTOR FLAT 6 TYPE FLOAT32 DIM 1536 DISTANCE_METRIC + // COSINE + FieldArgs vectorField = VectorFieldArgs. builder().name("doc_embedding").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(4) // Using smaller dimensions for testing + .distanceMetric(VectorFieldArgs.DistanceMetric.COSINE).build(); + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(DOCS_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + String result = redis.ftCreate(DOCUMENTS_INDEX, createArgs, Arrays.asList(vectorField, titleField, categoryField)); + assertThat(result).isEqualTo("OK"); + + // Add sample documents with vectors + float[] vector1 = { 0.1f, 0.2f, 0.3f, 0.4f }; + float[] vector2 = { 0.2f, 0.3f, 0.4f, 0.5f }; + float[] vector3 = { 0.9f, 0.8f, 0.7f, 0.6f }; + + // Store vectors as binary data using binary connection + ByteBuffer titleKey = ByteBuffer.wrap("title".getBytes(StandardCharsets.UTF_8)); + ByteBuffer categoryKey = ByteBuffer.wrap("category".getBytes(StandardCharsets.UTF_8)); + ByteBuffer embeddingKey = ByteBuffer.wrap("doc_embedding".getBytes(StandardCharsets.UTF_8)); + + ByteBuffer doc1Key = ByteBuffer.wrap("docs:1".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(doc1Key, titleKey, ByteBuffer.wrap("Redis Vector Search Tutorial".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc1Key, categoryKey, ByteBuffer.wrap("tutorial".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc1Key, embeddingKey, floatArrayToByteBuffer(vector1)); + + ByteBuffer doc2Key = ByteBuffer.wrap("docs:2".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(doc2Key, titleKey, ByteBuffer.wrap("Advanced Vector Techniques".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc2Key, categoryKey, ByteBuffer.wrap("advanced".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc2Key, embeddingKey, floatArrayToByteBuffer(vector2)); + + ByteBuffer doc3Key = ByteBuffer.wrap("docs:3".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(doc3Key, titleKey, ByteBuffer.wrap("Machine Learning Basics".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc3Key, categoryKey, ByteBuffer.wrap("tutorial".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(doc3Key, embeddingKey, floatArrayToByteBuffer(vector3)); + + // Test 1: Basic KNN search - find 2 nearest neighbors using binary connection + float[] queryVector = { 0.15f, 0.25f, 0.35f, 0.45f }; // Similar to vector1 and vector2 + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + // Use binary connection for search to handle binary vector data properly + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs knnArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 2).build(); + + ByteBuffer indexKey = ByteBuffer.wrap(DOCUMENTS_INDEX.getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("*=>[KNN 2 @doc_embedding $BLOB AS vector_score]".getBytes(StandardCharsets.UTF_8)); + + SearchReply results = redisBinary.ftSearch(indexKey, queryString, knnArgs); + + assertThat(results.getCount()).isEqualTo(2); + assertThat(results.getResults()).hasSize(2); + + // The results should be sorted by vector similarity (closest first) + // vector1 and vector2 should be more similar to queryVector than vector3 + SearchReply.SearchResult firstResult = results.getResults().get(0); + SearchReply.SearchResult secondResult = results.getResults().get(1); + + // Convert ByteBuffer results back to strings for assertions + ByteBuffer titleFieldKey = ByteBuffer.wrap("title".getBytes(StandardCharsets.UTF_8)); + String firstTitle = new String(firstResult.getFields().get(titleFieldKey).array(), StandardCharsets.UTF_8); + String secondTitle = new String(secondResult.getFields().get(titleFieldKey).array(), StandardCharsets.UTF_8); + + assertThat(firstTitle).isIn("Redis Vector Search Tutorial", "Advanced Vector Techniques"); + assertThat(secondTitle).isIn("Redis Vector Search Tutorial", "Advanced Vector Techniques"); + + // Cleanup + redis.ftDropindex(DOCUMENTS_INDEX); + } + + /** + * Test HNSW vector index with runtime parameters and filtering. Based on Redis documentation examples for HNSW algorithm. + */ + @Test + void testHnswVectorIndexWithFiltering() { + // Create HNSW vector index with custom parameters + FieldArgs vectorField = VectorFieldArgs. builder().name("movie_embedding").hnsw() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(3).distanceMetric(VectorFieldArgs.DistanceMetric.L2) + .attribute("M", 40).attribute("EF_CONSTRUCTION", 250).build(); + + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs genreField = TagFieldArgs. builder().name("genre").build(); + FieldArgs yearField = NumericFieldArgs. builder().name("year").sortable().build(); + FieldArgs ratingField = NumericFieldArgs. builder().name("rating").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(vectorField, titleField, genreField, yearField, ratingField)); + + // Add sample movies with vectors + float[] actionVector = { 1.0f, 0.1f, 0.1f }; + float[] dramaVector = { 0.1f, 1.0f, 0.1f }; + float[] sciFiVector = { 0.1f, 0.1f, 1.0f }; + float[] actionDramaVector = { 0.7f, 0.7f, 0.1f }; + + // Store movie data using binary connection for vector fields + ByteBuffer titleKey = ByteBuffer.wrap("title".getBytes(StandardCharsets.UTF_8)); + ByteBuffer genreKey = ByteBuffer.wrap("genre".getBytes(StandardCharsets.UTF_8)); + ByteBuffer yearKey = ByteBuffer.wrap("year".getBytes(StandardCharsets.UTF_8)); + ByteBuffer ratingKey = ByteBuffer.wrap("rating".getBytes(StandardCharsets.UTF_8)); + ByteBuffer embeddingKey = ByteBuffer.wrap("movie_embedding".getBytes(StandardCharsets.UTF_8)); + + ByteBuffer movie1Key = ByteBuffer.wrap("movie:1".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(movie1Key, titleKey, ByteBuffer.wrap("The Matrix".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie1Key, genreKey, ByteBuffer.wrap("action,sci-fi".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie1Key, yearKey, ByteBuffer.wrap("1999".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie1Key, ratingKey, ByteBuffer.wrap("8.7".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie1Key, embeddingKey, floatArrayToByteBuffer(actionVector)); + + ByteBuffer movie2Key = ByteBuffer.wrap("movie:2".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(movie2Key, titleKey, ByteBuffer.wrap("The Godfather".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie2Key, genreKey, ByteBuffer.wrap("drama,crime".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie2Key, yearKey, ByteBuffer.wrap("1972".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie2Key, ratingKey, ByteBuffer.wrap("9.2".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie2Key, embeddingKey, floatArrayToByteBuffer(dramaVector)); + + ByteBuffer movie3Key = ByteBuffer.wrap("movie:3".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(movie3Key, titleKey, ByteBuffer.wrap("Blade Runner".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie3Key, genreKey, ByteBuffer.wrap("sci-fi,thriller".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie3Key, yearKey, ByteBuffer.wrap("1982".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie3Key, ratingKey, ByteBuffer.wrap("8.1".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie3Key, embeddingKey, floatArrayToByteBuffer(sciFiVector)); + + ByteBuffer movie4Key = ByteBuffer.wrap("movie:4".getBytes(StandardCharsets.UTF_8)); + redisBinary.hset(movie4Key, titleKey, ByteBuffer.wrap("Heat".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie4Key, genreKey, ByteBuffer.wrap("action,drama".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie4Key, yearKey, ByteBuffer.wrap("1995".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie4Key, ratingKey, ByteBuffer.wrap("8.3".getBytes(StandardCharsets.UTF_8))); + redisBinary.hset(movie4Key, embeddingKey, floatArrayToByteBuffer(actionDramaVector)); + + // Test 1: KNN search with genre filter using binary codec + float[] queryVector = { 0.8f, 0.6f, 0.2f }; // Similar to action-drama + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs filterArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 10).build(); + + ByteBuffer indexKey = ByteBuffer.wrap(MOVIES_INDEX.getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("(@genre:{action})=>[KNN 3 @movie_embedding $BLOB AS movie_distance]".getBytes(StandardCharsets.UTF_8)); + + // Search for action movies with vector similarity + SearchReply results = redisBinary.ftSearch(indexKey, queryString, filterArgs); + + assertThat(results.getCount()).isEqualTo(2); // The Matrix and Heat have action genre + ByteBuffer genreFieldKey = ByteBuffer.wrap("genre".getBytes(StandardCharsets.UTF_8)); + for (SearchReply.SearchResult result : results.getResults()) { + String genre = new String(result.getFields().get(genreFieldKey).array(), StandardCharsets.UTF_8); + assertThat(genre).contains("action"); + } + + // Test 2: KNN search with year range filter + SearchArgs yearFilterArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 10).build(); + + ByteBuffer yearQueryString = ByteBuffer + .wrap("(@year:[1990 2000])=>[KNN 2 @movie_embedding $BLOB AS movie_distance]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, yearQueryString, yearFilterArgs); + + assertThat(results.getCount()).isEqualTo(2); // The Matrix (1999) and Heat (1995) + + // Test 3: KNN search with runtime EF parameter + ByteBuffer efKey = ByteBuffer.wrap("EF".getBytes(StandardCharsets.UTF_8)); + ByteBuffer efValue = ByteBuffer.wrap("150".getBytes(StandardCharsets.UTF_8)); + SearchArgs efArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).param(efKey, efValue).limit(0, 10).build(); + + ByteBuffer efQueryString = ByteBuffer + .wrap("*=>[KNN 3 @movie_embedding $BLOB EF_RUNTIME $EF AS movie_distance]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, efQueryString, efArgs); + + assertThat(results.getCount()).isEqualTo(3); + + // Cleanup + redis.ftDropindex(MOVIES_INDEX); + } + + /** + * Test vector range queries based on Redis documentation examples. Vector range queries filter results based on semantic + * distance radius. + */ + @Test + void testVectorRangeQueries() { + // Create vector index for range query testing + FieldArgs vectorField = VectorFieldArgs. builder().name("description_vector").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(3).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) + .build(); + + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + FieldArgs typeField = TagFieldArgs. builder().name("type").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(PRODUCTS_INDEX, createArgs, Arrays.asList(vectorField, nameField, typeField, priceField)); + + // Add sample products with vectors representing different categories + float[] electronicsVector = { 1.0f, 0.0f, 0.0f }; + float[] clothingVector = { 0.0f, 1.0f, 0.0f }; + float[] booksVector = { 0.0f, 0.0f, 1.0f }; + float[] mixedVector = { 0.5f, 0.5f, 0.0f }; // Between electronics and clothing + + // Store products using binary codec + Map product1 = new HashMap<>(); + product1.put("name", "Laptop"); + product1.put("type", "electronics"); + product1.put("price", "999.99"); + product1.put("description_vector", electronicsVector); + storeHashDocument("product:1", product1); + + Map product2 = new HashMap<>(); + product2.put("name", "T-Shirt"); + product2.put("type", "clothing"); + product2.put("price", "29.99"); + product2.put("description_vector", clothingVector); + storeHashDocument("product:2", product2); + + Map product3 = new HashMap<>(); + product3.put("name", "Programming Book"); + product3.put("type", "books"); + product3.put("price", "49.99"); + product3.put("description_vector", booksVector); + storeHashDocument("product:3", product3); + + Map product4 = new HashMap<>(); + product4.put("name", "Smart Watch"); + product4.put("type", "electronics"); + product4.put("price", "299.99"); + product4.put("description_vector", mixedVector); + storeHashDocument("product:4", product4); + + // Test 1: Vector range query - find products within distance 0.5 of electronics vector using binary codec + float[] queryVector = { 0.9f, 0.1f, 0.0f }; // Close to electronics + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs rangeArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 100).build(); + + ByteBuffer indexKey = ByteBuffer.wrap(PRODUCTS_INDEX.getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("@description_vector:[VECTOR_RANGE 0.5 $BLOB]".getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, rangeArgs); + + // Should find electronics products and smart watch (mixed vector) + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + ByteBuffer typeKey = ByteBuffer.wrap("type".getBytes(StandardCharsets.UTF_8)); + for (SearchReply.SearchResult result : results.getResults()) { + String productType = new String(result.getFields().get(typeKey).array(), StandardCharsets.UTF_8); + assertThat(productType).isIn("electronics"); // Electronics should be within range + } + + // Test 2: Vector range query with distance field and sorting + SearchArgs sortedRangeArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 100).build(); + + ByteBuffer sortedQueryString = ByteBuffer + .wrap("@description_vector:[VECTOR_RANGE 1.0 $BLOB]=>{$YIELD_DISTANCE_AS: vector_distance}" + .getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, sortedQueryString, sortedRangeArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(2); + + // Test 3: Combined filter - vector range + price filter + SearchArgs combinedArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 100).build(); + + ByteBuffer combinedQueryString = ByteBuffer + .wrap("(@price:[200 1000]) | @description_vector:[VECTOR_RANGE 0.8 $BLOB]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, combinedQueryString, combinedArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Cleanup + redis.ftDropindex(PRODUCTS_INDEX); + } + + /** + * Test different distance metrics (L2, COSINE, IP) and vector types. Based on Redis documentation for distance metrics. + */ + @Test + void testDistanceMetricsAndVectorTypes() { + // Test with different distance metrics + String[] metrics = { "L2", "COSINE", "IP" }; + + for (String metric : metrics) { + String indexName = "test-" + metric.toLowerCase() + "-idx"; + + FieldArgs vectorField = VectorFieldArgs. builder().name("embedding").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(2) + .distanceMetric(VectorFieldArgs.DistanceMetric.valueOf(metric)).build(); + + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("test:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(indexName, createArgs, Arrays.asList(vectorField, nameField)); + + // Add test vectors + float[] vector1 = { 1.0f, 0.0f }; + float[] vector2 = { 0.0f, 1.0f }; + + Map doc1 = new HashMap<>(); + doc1.put("name", "Point A"); + doc1.put("embedding", vector1); + storeHashDocument("test:1", doc1); + + Map doc2 = new HashMap<>(); + doc2.put("name", "Point B"); + doc2.put("embedding", vector2); + storeHashDocument("test:2", doc2); + + // Test KNN search with this metric using binary codec + float[] queryVector = { 0.7f, 0.3f }; + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs searchArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 2).build(); + + ByteBuffer indexKey = ByteBuffer.wrap(indexName.getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("*=>[KNN 2 @embedding $BLOB AS distance]".getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, searchArgs); + + assertThat(results.getCount()).isEqualTo(2); + assertThat(results.getResults()).hasSize(2); + + // Cleanup + redis.ftDropindex(indexName); + } + } + + /** + * Test JSON storage for vectors as arrays instead of binary data. Based on Redis documentation for JSON vector storage. + * This test demonstrates that JSON vector search works correctly when using field aliases. + */ + @Test + void testJsonVectorStorage() { + // Create vector index for JSON documents with field aliases (key for proper search syntax) + FieldArgs vectorField = VectorFieldArgs. builder().name("$.vector").as("vector").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(3).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) + .build(); + + FieldArgs titleField = TextFieldArgs. builder().name("$.title").as("title").build(); + FieldArgs categoryField = TagFieldArgs. builder().name("$.category").as("category").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("json:") + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate("json-vector-idx", createArgs, Arrays.asList(vectorField, titleField, categoryField)); + + // Add JSON documents with vector arrays + + String doc1Raw = "{\"title\":\"Document 1\",\"category\":\"tech\",\"vector\":[0.1,0.2,0.3]}"; + String doc2Raw = "{\"title\":\"Document 2\",\"category\":\"science\",\"vector\":[0.4,0.5,0.6]}"; + String doc3Raw = "{\"title\":\"Document 3\",\"category\":\"tech\",\"vector\":[0.7,0.8,0.9]}"; + + JsonParser parser = redis.getJsonParser(); + JsonValue doc1 = parser.createJsonValue(doc1Raw); + JsonValue doc2 = parser.createJsonValue(doc2Raw); + JsonValue doc3 = parser.createJsonValue(doc3Raw); + + redis.jsonSet("json:1", JsonPath.ROOT_PATH, doc1); + redis.jsonSet("json:2", JsonPath.ROOT_PATH, doc2); + redis.jsonSet("json:3", JsonPath.ROOT_PATH, doc3); + + // Test KNN search on JSON vectors + // Note: For JSON vectors, we still need to pass the query vector as bytes + float[] queryVector = { 0.2f, 0.3f, 0.4f }; + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + // Test 1: KNN search with ADHOC_BF hybrid policy using binary codec + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs adhocArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 3).build(); + + ByteBuffer indexKey = ByteBuffer.wrap("json-vector-idx".getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer.wrap("*=>[KNN 3 @vector $BLOB]".getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, adhocArgs); + + assertThat(results.getCount()).isEqualTo(3); + assertThat(results.getResults()).hasSize(3); + + // Test filtering with JSON vectors + SearchArgs filterArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 10).build(); + + ByteBuffer filterQueryString = ByteBuffer + .wrap("(@category:{tech})=>[KNN 2 @vector $BLOB]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, filterQueryString, filterArgs); + + assertThat(results.getCount()).isEqualTo(2); // Only tech category documents + + // Cleanup + redis.ftDropindex("json-vector-idx"); + redis.del("json:1", "json:2", "json:3"); + } + + /** + * Test advanced vector search features including hybrid policies and batch sizes. Based on Redis documentation for runtime + * query parameters. + */ + @Test + void testAdvancedVectorSearchFeatures() { + // Create HNSW index for advanced testing + VectorFieldArgs vectorField = VectorFieldArgs. builder().name("content_vector").hnsw() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(4).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) + .attribute("M", 16).attribute("EF_CONSTRUCTION", 200).build(); + + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs statusField = TagFieldArgs. builder().name("status").build(); + FieldArgs priorityField = NumericFieldArgs. builder().name("priority").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("task:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("tasks-idx", createArgs, Arrays.asList(vectorField, titleField, statusField, priorityField)); + + // Add multiple tasks with different vectors and metadata using binary codec + for (int i = 1; i <= 10; i++) { + float[] vector = { (float) Math.random(), (float) Math.random(), (float) Math.random(), (float) Math.random() }; + + Map task = new HashMap<>(); + task.put("title", "Task " + i); + task.put("status", i % 2 == 0 ? "active" : "completed"); + task.put("priority", String.valueOf(i % 5 + 1)); + task.put("content_vector", vector); + storeHashDocument("task:" + i, task); + } + + float[] queryVector = { 0.5f, 0.5f, 0.5f, 0.5f }; + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + // Test 1: KNN search with ADHOC_BF hybrid policy using binary codec + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs adhocArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 5).build(); + + ByteBuffer indexKey = ByteBuffer.wrap("tasks-idx".getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("(@status:{active})=>[KNN 5 @content_vector $BLOB HYBRID_POLICY ADHOC_BF AS task_score]" + .getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, adhocArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Test 2: KNN search with BATCHES hybrid policy and custom batch size + ByteBuffer batchSizeKey = ByteBuffer.wrap("BATCH_SIZE".getBytes(StandardCharsets.UTF_8)); + ByteBuffer batchSizeValue = ByteBuffer.wrap("3".getBytes(StandardCharsets.UTF_8)); + SearchArgs batchArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).param(batchSizeKey, batchSizeValue).limit(0, 5).build(); + + ByteBuffer batchQueryString = ByteBuffer.wrap( + "(@status:{active})=>[KNN 5 @content_vector $BLOB HYBRID_POLICY BATCHES BATCH_SIZE $BATCH_SIZE AS task_score]" + .getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, batchQueryString, batchArgs); + + assertThat(results.getCount()).isGreaterThanOrEqualTo(1); + + // Test 3: Vector search with custom EF_RUNTIME parameter + ByteBuffer efKey = ByteBuffer.wrap("EF".getBytes(StandardCharsets.UTF_8)); + ByteBuffer efValue = ByteBuffer.wrap("50".getBytes(StandardCharsets.UTF_8)); + SearchArgs efArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).param(efKey, efValue).limit(0, 3).build(); + + ByteBuffer efQueryString = ByteBuffer + .wrap("*=>[KNN 3 @content_vector $BLOB EF_RUNTIME $EF AS task_score]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, efQueryString, efArgs); + + assertThat(results.getCount()).isEqualTo(3); + + // Test 4: Complex query with multiple filters and vector search + SearchArgs complexArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 10).build(); + + ByteBuffer complexQueryString = ByteBuffer + .wrap("((@status:{active}) (@priority:[3 5]))=>[KNN 5 @content_vector $BLOB AS task_score]" + .getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, complexQueryString, complexArgs); + + // Verify all results match the filter criteria + ByteBuffer statusKey = ByteBuffer.wrap("status".getBytes(StandardCharsets.UTF_8)); + ByteBuffer priorityKey = ByteBuffer.wrap("priority".getBytes(StandardCharsets.UTF_8)); + for (SearchReply.SearchResult result : results.getResults()) { + String status = new String(result.getFields().get(statusKey).array(), StandardCharsets.UTF_8); + String priorityStr = new String(result.getFields().get(priorityKey).array(), StandardCharsets.UTF_8); + assertThat(status).isEqualTo("active"); + int priority = Integer.parseInt(priorityStr); + assertThat(priority).isBetween(3, 5); + } + + // Test 5: Vector search with timeout + SearchArgs timeoutArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).timeout(Duration.ofSeconds(5)).limit(0, 5).build(); + + ByteBuffer timeoutQueryString = ByteBuffer + .wrap("*=>[KNN 5 @content_vector $BLOB AS task_score]".getBytes(StandardCharsets.UTF_8)); + results = redisBinary.ftSearch(indexKey, timeoutQueryString, timeoutArgs); + + assertThat(results.getCount()).isEqualTo(5); + + // Cleanup + redis.ftDropindex("tasks-idx"); + } + + /** + * Test vector search with different vector types (FLOAT32, FLOAT64) and precision. Based on Redis documentation for memory + * consumption comparison. + */ + @Test + void testVectorTypesAndPrecision() { + // Test FLOAT64 vectors + FieldArgs float64Field = VectorFieldArgs. builder().name("embedding_f64").flat() + .type(VectorFieldArgs.VectorType.FLOAT64).dimensions(2).distanceMetric(VectorFieldArgs.DistanceMetric.L2) + .build(); + + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("precision:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("precision-idx", createArgs, Arrays.asList(float64Field, nameField)); + + // Add vectors with high precision values + double[] preciseVector1 = { 1.123456789012345, 2.987654321098765 }; + double[] preciseVector2 = { 3.141592653589793, 2.718281828459045 }; + + // Convert double arrays to byte arrays (FLOAT64) with little-endian byte order + ByteBuffer buffer1 = ByteBuffer.allocate(preciseVector1.length * 8).order(ByteOrder.LITTLE_ENDIAN); + for (double value : preciseVector1) { + buffer1.putDouble(value); + } + + ByteBuffer buffer2 = ByteBuffer.allocate(preciseVector2.length * 8).order(ByteOrder.LITTLE_ENDIAN); + for (double value : preciseVector2) { + buffer2.putDouble(value); + } + + // Store documents using binary codec with FLOAT64 vectors + Map doc1 = new HashMap<>(); + doc1.put("name", "High Precision Vector 1"); + doc1.put("embedding_f64", buffer1.array()); + storeHashDocument("precision:1", doc1); + + Map doc2 = new HashMap<>(); + doc2.put("name", "High Precision Vector 2"); + doc2.put("embedding_f64", buffer2.array()); + storeHashDocument("precision:2", doc2); + + // Test KNN search with FLOAT64 query vector using binary codec + double[] queryVector = { 1.5, 2.5 }; + ByteBuffer queryBuffer = ByteBuffer.allocate(queryVector.length * 8).order(ByteOrder.LITTLE_ENDIAN); + for (double value : queryVector) { + queryBuffer.putDouble(value); + } + queryBuffer.flip(); + + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs precisionArgs = SearchArgs. builder() + .param(blobKey, queryBuffer).limit(0, 2).build(); + + ByteBuffer indexKey = ByteBuffer.wrap("precision-idx".getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer + .wrap("*=>[KNN 2 @embedding_f64 $BLOB AS distance]".getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, precisionArgs); + + assertThat(results.getCount()).isEqualTo(2); + assertThat(results.getResults()).hasSize(2); + + // Verify that the search worked with high precision vectors + ByteBuffer nameKey = ByteBuffer.wrap("name".getBytes(StandardCharsets.UTF_8)); + for (SearchReply.SearchResult result : results.getResults()) { + String name = new String(result.getFields().get(nameKey).array(), StandardCharsets.UTF_8); + assertThat(name).contains("High Precision Vector"); + } + + // Cleanup + redis.ftDropindex("precision-idx"); + } + + /** + * Test error handling and edge cases for vector search. + */ + @Test + void testVectorSearchErrorHandling() { + // Create a simple vector index + FieldArgs vectorField = VectorFieldArgs. builder().name("test_vector").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(3).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) + .build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix("error:") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate("error-test-idx", createArgs, Collections.singletonList(vectorField)); + + // Add a valid document using binary codec + float[] validVector = { 1.0f, 0.0f, 0.0f }; + Map doc = new HashMap<>(); + doc.put("test_vector", validVector); + storeHashDocument("error:1", doc); + + // Test 1: Valid KNN search should work + float[] queryVector = { 0.9f, 0.1f, 0.0f }; + ByteBuffer queryVectorBuffer = floatArrayToByteBuffer(queryVector); + + ByteBuffer blobKey = ByteBuffer.wrap("BLOB".getBytes(StandardCharsets.UTF_8)); + SearchArgs validArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 1).build(); + + ByteBuffer indexKey = ByteBuffer.wrap("error-test-idx".getBytes(StandardCharsets.UTF_8)); + ByteBuffer queryString = ByteBuffer.wrap("*=>[KNN 1 @test_vector $BLOB]".getBytes(StandardCharsets.UTF_8)); + SearchReply results = redisBinary.ftSearch(indexKey, queryString, validArgs); + + assertThat(results.getCount()).isEqualTo(1); + + // Test 2: Search with invalid field should throw exception + SearchArgs noResultsArgs = SearchArgs. builder() + .param(blobKey, queryVectorBuffer).limit(0, 10).build(); + + ByteBuffer noResultsQueryString = ByteBuffer + .wrap("(@nonexistent_field:value)=>[KNN 5 @test_vector $BLOB]".getBytes(StandardCharsets.UTF_8)); + + // This should throw an exception because the field doesn't exist + assertThatThrownBy(() -> redisBinary.ftSearch(indexKey, noResultsQueryString, noResultsArgs)) + .isInstanceOf(RedisCommandExecutionException.class).hasMessageContaining("Unknown field"); + + // Cleanup + redis.ftDropindex("error-test-idx"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchVectorResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchVectorResp2IntegrationTests.java new file mode 100644 index 000000000..2e56b7e3d --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchVectorResp2IntegrationTests.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; +import org.junit.jupiter.api.Tag; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +/** + * Integration tests for Redis Vector Search functionality using FT.SEARCH command with vector fields and RESP2 protocol. + *

+ * This test class extends {@link RediSearchVectorIntegrationTests} and runs all the same tests but using the RESP2 protocol + * instead of the default RESP3 protocol. + *

+ * The tests verify that Redis Vector Search functionality works correctly with both RESP2 and RESP3 protocols, ensuring + * backward compatibility and protocol-agnostic behavior for vector operations including: + *

    + *
  • FLAT and HNSW vector index creation and management
  • + *
  • KNN (k-nearest neighbor) vector searches with various parameters
  • + *
  • Vector range queries with distance thresholds
  • + *
  • Vector search with metadata filtering (text, numeric, tag fields)
  • + *
  • Different distance metrics (L2, COSINE, IP)
  • + *
  • Various vector types (FLOAT32, FLOAT64) and precision handling
  • + *
  • JSON vector storage and retrieval as arrays
  • + *
  • Advanced vector search features like hybrid policies and runtime parameters
  • + *
  • Vector search error handling and edge cases
  • + *
  • Runtime query parameters (EF_RUNTIME, EPSILON, BATCH_SIZE, HYBRID_POLICY)
  • + *
+ *

+ * These tests are based on the examples from the Redis documentation: + * Vector Search + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RediSearchVectorResp2IntegrationTests extends RediSearchVectorIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java b/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java new file mode 100644 index 000000000..dc9327912 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java @@ -0,0 +1,381 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.RedisClient; +import io.lettuce.core.RedisURI; +import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.json.JsonPath; +import io.lettuce.core.json.JsonValue; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Integration tests for Redis JSON indexing functionality based on the Redis documentation tutorial. + *

+ * These tests are based on the examples from the Redis documentation: + * https://redis.io/docs/latest/develop/interact/search-and-query/indexing/ + *

+ * The tests demonstrate how to index JSON documents, perform searches, and use various field types including TEXT, TAG, + * NUMERIC, and VECTOR fields with JSON data. + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RedisJsonIndexingIntegrationTests { + + // Index names + private static final String ITEM_INDEX = "itemIdx"; + + private static final String ITEM_INDEX_2 = "itemIdx2"; + + private static final String ITEM_INDEX_3 = "itemIdx3"; + + private static final String ITEM_INDEX_4 = "itemIdx4"; + + private static final String ITEM_INDEX_5 = "itemIdx5"; + + // Key prefixes + private static final String ITEM_PREFIX = "item:"; + + protected static RedisClient client; + + protected static RedisCommands redis; + + public RedisJsonIndexingIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(16379).build(); + client = RedisClient.create(redisURI); + client.setOptions(getOptions()); + redis = client.connect().sync(); + } + + protected ClientOptions getOptions() { + return ClientOptions.builder().build(); + } + + @BeforeEach + public void prepare() { + redis.flushall(); + } + + @AfterAll + static void teardown() { + if (client != null) { + client.shutdown(); + } + } + + /** + * Test basic JSON indexing and search functionality based on the Redis documentation tutorial. Creates an index for + * inventory items with TEXT and NUMERIC fields. + */ + @Test + void testBasicJsonIndexingAndSearch() { + // Create index based on Redis documentation example: + // FT.CREATE itemIdx ON JSON PREFIX 1 item: SCHEMA $.name AS name TEXT $.description as description TEXT $.price AS + // price NUMERIC + FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); + FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("$.price").as("price").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + String result = redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(nameField, descriptionField, priceField)); + assertThat(result).isEqualTo("OK"); + + // Add JSON documents using JSON.SET + JsonValue item1 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Noise-cancelling Bluetooth headphones\"," + + "\"description\":\"Wireless Bluetooth headphones with noise-cancelling technology\"," + + "\"connection\":{\"wireless\":true,\"type\":\"Bluetooth\"}," + "\"price\":99.98,\"stock\":25," + + "\"colors\":[\"black\",\"silver\"]}"); + + JsonValue item2 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Wireless earbuds\"," + "\"description\":\"Wireless Bluetooth in-ear headphones\"," + + "\"connection\":{\"wireless\":true,\"type\":\"Bluetooth\"}," + "\"price\":64.99,\"stock\":17," + + "\"colors\":[\"black\",\"white\"]}"); + + assertThat(redis.jsonSet("item:1", JsonPath.ROOT_PATH, item1)).isEqualTo("OK"); + assertThat(redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2)).isEqualTo("OK"); + + // Test 1: Search for items with "earbuds" in the name + SearchReply searchReply = redis.ftSearch(ITEM_INDEX, "@name:(earbuds)", null); + assertThat(searchReply.getCount()).isEqualTo(1); + assertThat(searchReply.getResults()).hasSize(1); + assertThat(searchReply.getResults().get(0).getId()).isEqualTo("item:2"); + + // Test 2: Search for items with "bluetooth" and "headphones" in description + searchReply = redis.ftSearch(ITEM_INDEX, "@description:(bluetooth headphones)", null); + assertThat(searchReply.getCount()).isEqualTo(2); + assertThat(searchReply.getResults()).hasSize(2); + + // Test 3: Search for Bluetooth headphones with price less than 70 + searchReply = redis.ftSearch(ITEM_INDEX, "@description:(bluetooth headphones) @price:[0 70]", null); + assertThat(searchReply.getCount()).isEqualTo(1); + assertThat(searchReply.getResults().get(0).getId()).isEqualTo("item:2"); + + // Cleanup + redis.ftDropindex(ITEM_INDEX, false); + } + + /** + * Test indexing JSON arrays as TAG fields with custom separators. Based on the Redis documentation example for indexing + * colors. + */ + @Test + void testJsonArraysAsTagFields() { + // Create index with TAG field for colors using wildcard JSONPath + // FT.CREATE itemIdx2 ON JSON PREFIX 1 item: SCHEMA $.colors.* AS colors TAG $.name AS name TEXT + FieldArgs colorsField = TagFieldArgs. builder().name("$.colors.*").as("colors").build(); + FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); + FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate(ITEM_INDEX_2, createArgs, Arrays.asList(colorsField, nameField, descriptionField)); + + // Add sample items with color arrays + JsonValue item1 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Noise-cancelling Bluetooth headphones\"," + + "\"description\":\"Wireless Bluetooth headphones with noise-cancelling technology\"," + + "\"colors\":[\"black\",\"silver\"]}"); + + JsonValue item2 = redis.getJsonParser().createJsonValue("{\"name\":\"Wireless earbuds\"," + + "\"description\":\"Wireless Bluetooth in-ear headphones\"," + "\"colors\":[\"black\",\"white\"]}"); + + JsonValue item3 = redis.getJsonParser().createJsonValue("{\"name\":\"True Wireless earbuds\"," + + "\"description\":\"True Wireless Bluetooth in-ear headphones\"," + "\"colors\":[\"red\",\"light blue\"]}"); + + redis.jsonSet("item:1", JsonPath.ROOT_PATH, item1); + redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2); + redis.jsonSet("item:3", JsonPath.ROOT_PATH, item3); + + // Test 1: Search for silver headphones + SearchReply results = redis.ftSearch(ITEM_INDEX_2, + "@colors:{silver} (@name:(headphones)|@description:(headphones))", null); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getId()).isEqualTo("item:1"); + + // Test 2: Search for black items + results = redis.ftSearch(ITEM_INDEX_2, "@colors:{black}", null); + assertThat(results.getCount()).isEqualTo(2); + + // Test 3: Search for white or light colored items + results = redis.ftSearch(ITEM_INDEX_2, "@colors:{white|light}", null); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getId()).isEqualTo("item:2"); + + // Cleanup + redis.ftDropindex(ITEM_INDEX_2, false); + } + + /** + * Test indexing JSON arrays as TEXT fields for full-text search. Based on Redis documentation example for searching within + * array content. + */ + @Test + void testJsonArraysAsTextFields() { + // Create index with TEXT field for colors array + // FT.CREATE itemIdx3 ON JSON PREFIX 1 item: SCHEMA $.colors AS colors TEXT $.name AS name TEXT + FieldArgs colorsField = TextFieldArgs. builder().name("$.colors").as("colors").build(); + FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); + FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate(ITEM_INDEX_3, createArgs, Arrays.asList(colorsField, nameField, descriptionField)); + + // Add sample items + JsonValue item2 = redis.getJsonParser().createJsonValue("{\"name\":\"Wireless earbuds\"," + + "\"description\":\"Wireless Bluetooth in-ear headphones\"," + "\"colors\":[\"black\",\"white\"]}"); + + JsonValue item3 = redis.getJsonParser().createJsonValue("{\"name\":\"True Wireless earbuds\"," + + "\"description\":\"True Wireless Bluetooth in-ear headphones\"," + "\"colors\":[\"red\",\"light blue\"]}"); + + redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2); + redis.jsonSet("item:3", JsonPath.ROOT_PATH, item3); + + // Test full text search for light colored headphones + SearchArgs returnArgs = SearchArgs. builder().returnField("$.colors").build(); + SearchReply results = redis.ftSearch(ITEM_INDEX_3, + "@colors:(white|light) (@name|description:(headphones))", returnArgs); + assertThat(results.getCount()).isEqualTo(2); + assertThat(results.getResults()).hasSize(2); + + // Cleanup + redis.ftDropindex(ITEM_INDEX_3, false); + } + + /** + * Test indexing JSON arrays as NUMERIC fields for range queries. Based on Redis documentation example for indexing + * max_level arrays. + */ + @Test + void testJsonArraysAsNumericFields() { + // Create index with NUMERIC field for max_level array + // FT.CREATE itemIdx4 ON JSON PREFIX 1 item: SCHEMA $.max_level AS dB NUMERIC + FieldArgs dbField = NumericFieldArgs. builder().name("$.max_level").as("dB").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate(ITEM_INDEX_4, createArgs, Collections.singletonList(dbField)); + + // Add sample items with max_level arrays + JsonValue item1 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Noise-cancelling Bluetooth headphones\"," + "\"max_level\":[60,70,80,90,100]}"); + + JsonValue item2 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Wireless earbuds\"," + "\"max_level\":[80,100,120]}"); + + JsonValue item3 = redis.getJsonParser() + .createJsonValue("{\"name\":\"True Wireless earbuds\"," + "\"max_level\":[90,100,110,120]}"); + + redis.jsonSet("item:1", JsonPath.ROOT_PATH, item1); + redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2); + redis.jsonSet("item:3", JsonPath.ROOT_PATH, item3); + + // Test 1: Search for headphones with max volume between 70 and 80 (inclusive) + SearchReply results = redis.ftSearch(ITEM_INDEX_4, "@dB:[70 80]", null); + assertThat(results.getCount()).isEqualTo(2); // item:1 and item:2 + + // Test 2: Search for items with all values in range [90, 120] + results = redis.ftSearch(ITEM_INDEX_4, "-@dB:[-inf (90] -@dB:[(120 +inf]", null); + assertThat(results.getCount()).isEqualTo(1); // item:3 + assertThat(results.getResults().get(0).getId()).isEqualTo("item:3"); + + // Cleanup + redis.ftDropindex(ITEM_INDEX_4, false); + } + + /** + * Test field projection with JSONPath expressions. Based on Redis documentation example for returning specific attributes. + */ + @Test + void testFieldProjectionWithJsonPath() { + // Create basic index + FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); + FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("$.price").as("price").build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(nameField, descriptionField, priceField)); + + // Add sample items + JsonValue item1 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Noise-cancelling Bluetooth headphones\"," + + "\"description\":\"Wireless Bluetooth headphones with noise-cancelling technology\"," + + "\"price\":99.98,\"stock\":25}"); + + JsonValue item2 = redis.getJsonParser().createJsonValue("{\"name\":\"Wireless earbuds\"," + + "\"description\":\"Wireless Bluetooth in-ear headphones\"," + "\"price\":64.99,\"stock\":17}"); + + redis.jsonSet("item:1", JsonPath.ROOT_PATH, item1); + redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2); + + // Test 1: Return specific attributes (name and price) + SearchArgs returnArgs = SearchArgs. builder().returnField("name").returnField("price") + .build(); + SearchReply results = redis.ftSearch(ITEM_INDEX, "@description:(headphones)", returnArgs); + assertThat(results.getCount()).isEqualTo(2); + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getFields()).containsKey("name"); + assertThat(result.getFields()).containsKey("price"); + assertThat(result.getFields()).doesNotContainKey("description"); + } + + // Test 2: Project with JSONPath expression (including non-indexed field) + SearchArgs jsonPathArgs = SearchArgs. builder().returnField("name").returnField("price") + .returnField("$.stock") // JSONPath without alias + .build(); + results = redis.ftSearch(ITEM_INDEX, "@description:(headphones)", jsonPathArgs); + assertThat(results.getCount()).isEqualTo(2); + for (SearchReply.SearchResult result : results.getResults()) { + assertThat(result.getFields()).containsKey("name"); + assertThat(result.getFields()).containsKey("price"); + assertThat(result.getFields()).containsKey("$.stock"); + } + + // Cleanup + redis.ftDropindex(ITEM_INDEX, false); + } + + /** + * Test indexing JSON objects by indexing individual elements. Based on Redis documentation example for connection object. + */ + @Test + void testJsonObjectIndexing() { + // Create index for individual object elements + // FT.CREATE itemIdx ON JSON SCHEMA $.connection.wireless AS wireless TAG $.connection.type AS connectionType TEXT + FieldArgs wirelessField = TagFieldArgs. builder().name("$.connection.wireless").as("wireless").build(); + FieldArgs connectionTypeField = TextFieldArgs. builder().name("$.connection.type").as("connectionType") + .build(); + + CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + .on(CreateArgs.TargetType.JSON).build(); + + redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(wirelessField, connectionTypeField)); + + // Add sample items with connection objects + JsonValue item1 = redis.getJsonParser().createJsonValue("{\"name\":\"Noise-cancelling Bluetooth headphones\"," + + "\"connection\":{\"wireless\":true,\"type\":\"Bluetooth\"}}"); + + JsonValue item2 = redis.getJsonParser() + .createJsonValue("{\"name\":\"Wired headphones\"," + "\"connection\":{\"wireless\":false,\"type\":\"3.5mm\"}}"); + + redis.jsonSet("item:1", JsonPath.ROOT_PATH, item1); + redis.jsonSet("item:2", JsonPath.ROOT_PATH, item2); + + // Test 1: Search for wireless items + SearchReply results = redis.ftSearch(ITEM_INDEX, "@wireless:{true}", null); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getId()).isEqualTo("item:1"); + + // Test 2: Search for Bluetooth connection type + results = redis.ftSearch(ITEM_INDEX, "@connectionType:(bluetooth)", null); + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults().get(0).getId()).isEqualTo("item:1"); + + // Cleanup + redis.ftDropindex(ITEM_INDEX, false); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RedisJsonIndexingResp2IntegrationTests.java b/src/test/java/io/lettuce/core/search/RedisJsonIndexingResp2IntegrationTests.java new file mode 100644 index 000000000..99686c99f --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RedisJsonIndexingResp2IntegrationTests.java @@ -0,0 +1,50 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + * + * This file contains contributions from third-party contributors + * licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.ClientOptions; +import io.lettuce.core.protocol.ProtocolVersion; +import org.junit.jupiter.api.Tag; + +import static io.lettuce.TestTags.INTEGRATION_TEST; + +/** + * Integration tests for Redis JSON indexing functionality using RESP2 protocol. + *

+ * This test class extends {@link RedisJsonIndexingIntegrationTests} and runs all the same tests but using the RESP2 protocol + * instead of the default RESP3 protocol. + *

+ * The tests verify that Redis JSON indexing functionality works correctly with both RESP2 and RESP3 protocols, ensuring + * backward compatibility and protocol-agnostic behavior. + *

+ * Based on the Redis documentation tutorial: https://redis.io/docs/latest/develop/interact/search-and-query/indexing/ + * + * @author Tihomir Mateev + */ +@Tag(INTEGRATION_TEST) +public class RedisJsonIndexingResp2IntegrationTests extends RedisJsonIndexingIntegrationTests { + + @Override + protected ClientOptions getOptions() { + return ClientOptions.builder().protocolVersion(ProtocolVersion.RESP2).build(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/SearchResultsTest.java b/src/test/java/io/lettuce/core/search/SearchResultsTest.java new file mode 100644 index 000000000..0bdf82652 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/SearchResultsTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.jupiter.api.Test; + +/** + * Unit tests for {@link SearchReply}. + * + * @author Tihomir Mateev + */ +class SearchResultsTest { + + @Test + void testEmptySearchResults() { + SearchReply results = new SearchReply<>(); + + assertThat(results.getCount()).isEqualTo(0); + assertThat(results.getResults()).isEmpty(); + assertThat(results.size()).isEqualTo(0); + assertThat(results.isEmpty()).isTrue(); + } + + @Test + void testSearchResultsWithData() { + SearchReply results = new SearchReply<>(); + results.setCount(10); + + // Create a search result + SearchReply.SearchResult result1 = new SearchReply.SearchResult<>("doc1"); + result1.setScore(0.95); + result1.setPayload("payload1"); + result1.setSortKey("sortkey1"); + + Map fields1 = new HashMap<>(); + fields1.put("title", "Test Document 1"); + fields1.put("content", "This is test content"); + result1.addFields(fields1); + + results.addResult(result1); + + // Create another search result + SearchReply.SearchResult result2 = new SearchReply.SearchResult<>("doc2"); + result2.setScore(0.87); + + Map fields2 = new HashMap<>(); + fields2.put("title", "Test Document 2"); + fields2.put("content", "This is more test content"); + result2.addFields(fields2); + + results.addResult(result2); + + // Verify results + assertThat(results.getCount()).isEqualTo(10); + assertThat(results.size()).isEqualTo(2); + assertThat(results.isEmpty()).isFalse(); + + assertThat(results.getResults()).hasSize(2); + + SearchReply.SearchResult firstResult = results.getResults().get(0); + assertThat(firstResult.getId()).isEqualTo("doc1"); + assertThat(firstResult.getScore()).isEqualTo(0.95); + assertThat(firstResult.getPayload()).isEqualTo("payload1"); + assertThat(firstResult.getSortKey()).isEqualTo("sortkey1"); + assertThat(firstResult.getFields()).containsEntry("title", "Test Document 1"); + assertThat(firstResult.getFields()).containsEntry("content", "This is test content"); + + SearchReply.SearchResult secondResult = results.getResults().get(1); + assertThat(secondResult.getId()).isEqualTo("doc2"); + assertThat(secondResult.getScore()).isEqualTo(0.87); + assertThat(secondResult.getPayload()).isNull(); + assertThat(secondResult.getSortKey()).isNull(); + assertThat(secondResult.getFields()).containsEntry("title", "Test Document 2"); + assertThat(secondResult.getFields()).containsEntry("content", "This is more test content"); + } + + @Test + void testSearchResultsConstructorWithData() { + SearchReply.SearchResult result = new SearchReply.SearchResult<>("doc1"); + result.setScore(0.95); + + SearchReply results = new SearchReply<>(5, java.util.Arrays.asList(result)); + + assertThat(results.getCount()).isEqualTo(5); + assertThat(results.size()).isEqualTo(1); + assertThat(results.getResults().get(0).getId()).isEqualTo("doc1"); + assertThat(results.getResults().get(0).getScore()).isEqualTo(0.95); + } + + @Test + void testSearchResultImmutability() { + SearchReply results = new SearchReply<>(); + SearchReply.SearchResult result = new SearchReply.SearchResult<>("doc1"); + results.addResult(result); + + // The returned list should be unmodifiable + assertThat(results.getResults()).hasSize(1); + + // Attempting to modify the returned list should not affect the original + try { + results.getResults().clear(); + // If we reach here, the list is modifiable, which is unexpected + assertThat(false).as("Expected UnsupportedOperationException").isTrue(); + } catch (UnsupportedOperationException e) { + // This is expected - the list should be unmodifiable + assertThat(results.getResults()).hasSize(1); + } + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java new file mode 100644 index 000000000..84523c6fd --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java @@ -0,0 +1,198 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.List; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link CreateArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class CreateArgsTest { + + @Test + void testDefaultCreateArgs() { + CreateArgs args = CreateArgs. builder().build(); + + assertThat(args.getOn()).hasValue(CreateArgs.TargetType.HASH); + assertThat(args.getPrefixes()).isEmpty(); + assertThat(args.getFilter()).isEmpty(); + assertThat(args.getDefaultLanguage()).isEmpty(); + assertThat(args.getLanguageField()).isEmpty(); + assertThat(args.getDefaultScore()).isEmpty(); + assertThat(args.getScoreField()).isEmpty(); + assertThat(args.getPayloadField()).isEmpty(); + assertThat(args.isMaxTextFields()).isFalse(); + assertThat(args.getTemporary()).isEmpty(); + assertThat(args.isNoOffsets()).isFalse(); + assertThat(args.isNoHighlight()).isFalse(); + assertThat(args.isNoFields()).isFalse(); + assertThat(args.isNoFrequency()).isFalse(); + assertThat(args.isSkipInitialScan()).isFalse(); + assertThat(args.getStopWords()).isEmpty(); + } + + @Test + void testCreateArgsWithTargetType() { + CreateArgs hashArgs = CreateArgs. builder().on(CreateArgs.TargetType.HASH).build(); + assertThat(hashArgs.getOn()).hasValue(CreateArgs.TargetType.HASH); + + CreateArgs jsonArgs = CreateArgs. builder().on(CreateArgs.TargetType.JSON).build(); + assertThat(jsonArgs.getOn()).hasValue(CreateArgs.TargetType.JSON); + } + + @Test + void testCreateArgsWithPrefixes() { + CreateArgs args = CreateArgs. builder().addPrefix("blog:").addPrefix("post:") + .addPrefix("article:").build(); + + assertThat(args.getPrefixes()).containsExactly("blog:", "post:", "article:"); + } + + @Test + void testCreateArgsWithFilter() { + CreateArgs args = CreateArgs. builder().filter("@status:published").build(); + + assertThat(args.getFilter()).hasValue("@status:published"); + } + + @Test + void testCreateArgsWithLanguageSettings() { + CreateArgs args = CreateArgs. builder().defaultLanguage(DocumentLanguage.ENGLISH) + .languageField("lang").build(); + + assertThat(args.getDefaultLanguage()).hasValue(DocumentLanguage.ENGLISH); + assertThat(args.getLanguageField()).hasValue("lang"); + } + + @Test + void testCreateArgsWithScoreSettings() { + CreateArgs args = CreateArgs. builder().defaultScore(0.5).scoreField("score").build(); + + assertThat(args.getDefaultScore()).hasValue(0.5); + assertThat(args.getScoreField()).hasValue("score"); + } + + @Test + void testCreateArgsWithPayloadField() { + CreateArgs args = CreateArgs. builder().payloadField("payload").build(); + + assertThat(args.getPayloadField()).hasValue("payload"); + } + + @Test + void testCreateArgsWithFlags() { + CreateArgs args = CreateArgs. builder().maxTextFields(true).noOffsets(true) + .noHighlighting(true).noFields(true).noFrequency(true).skipInitialScan(true).build(); + + assertThat(args.isMaxTextFields()).isTrue(); + assertThat(args.isNoOffsets()).isTrue(); + assertThat(args.isNoHighlight()).isTrue(); + assertThat(args.isNoFields()).isTrue(); + assertThat(args.isNoFrequency()).isTrue(); + assertThat(args.isSkipInitialScan()).isTrue(); + } + + @Test + void testCreateArgsWithTemporary() { + CreateArgs args = CreateArgs. builder().temporary(3600).build(); + + assertThat(args.getTemporary()).hasValue(3600L); + } + + @Test + void testCreateArgsWithStopWords() { + List stopWords = Arrays.asList("the", "and", "or", "but"); + CreateArgs args = CreateArgs. builder().stopWords(stopWords).build(); + + assertThat(args.getStopWords()).hasValue(stopWords); + } + + @Test + void testCreateArgsWithEmptyStopWords() { + CreateArgs args = CreateArgs. builder().stopWords(Arrays.asList()).build(); + + assertThat(args.getStopWords()).hasValue(Arrays.asList()); + } + + @Test + void testCreateArgsBuild() { + CreateArgs args = CreateArgs. builder().on(CreateArgs.TargetType.JSON) + .addPrefix("blog:").addPrefix("post:").filter("@status:published").defaultLanguage(DocumentLanguage.FRENCH) + .languageField("lang").defaultScore(0.8).scoreField("score").payloadField("payload").maxTextFields(true) + .temporary(7200).noOffsets(true).noHighlighting(true).noFields(true).noFrequency(true).skipInitialScan(true) + .stopWords(Arrays.asList("le", "la", "et")).build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("JSON"); + assertThat(argsString).contains("PREFIX"); + assertThat(argsString).contains("2"); + assertThat(argsString).contains("FILTER"); + assertThat(argsString).contains("LANGUAGE"); + assertThat(argsString).contains("french"); + assertThat(argsString).contains("LANGUAGE_FIELD"); + assertThat(argsString).contains("SCORE"); + assertThat(argsString).contains("0.8"); + assertThat(argsString).contains("SCORE_FIELD"); + assertThat(argsString).contains("PAYLOAD_FIELD"); + assertThat(argsString).contains("MAXTEXTFIELDS"); + assertThat(argsString).contains("TEMPORARY"); + assertThat(argsString).contains("7200"); + assertThat(argsString).contains("NOOFFSETS"); + assertThat(argsString).contains("NOHL"); + assertThat(argsString).contains("NOFIELDS"); + assertThat(argsString).contains("NOFREQS"); + assertThat(argsString).contains("SKIPINITIALSCAN"); + assertThat(argsString).contains("STOPWORDS"); + assertThat(argsString).contains("3"); + assertThat(argsString).contains("le"); + assertThat(argsString).contains("la"); + assertThat(argsString).contains("et"); + + } + + @Test + void testCreateArgsMinimalBuild() { + CreateArgs args = CreateArgs. builder().addPrefix("test:").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("HASH"); // Default target type + assertThat(argsString).contains("PREFIX"); + assertThat(argsString).contains("1"); + assertThat(argsString).doesNotContain("FILTER"); + assertThat(argsString).doesNotContain("LANGUAGE"); + assertThat(argsString).doesNotContain("SCORE"); + assertThat(argsString).doesNotContain("TEMPORARY"); + assertThat(argsString).doesNotContain("STOPWORDS"); + } + + @Test + void testTargetTypeEnum() { + assertThat(CreateArgs.TargetType.HASH.name()).isEqualTo("HASH"); + assertThat(CreateArgs.TargetType.JSON.name()).isEqualTo("JSON"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/FieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/FieldArgsTest.java new file mode 100644 index 000000000..0d50a0977 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/FieldArgsTest.java @@ -0,0 +1,195 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link FieldArgs} and its concrete implementations. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class FieldArgsTest { + + /** + * Concrete implementation of FieldArgs for testing purposes. + */ + private static class TestFieldArgs extends FieldArgs { + + @Override + public String getFieldType() { + return "TEST"; + } + + @Override + protected void buildTypeSpecificArgs(CommandArgs args) { + // No type-specific arguments for test field + } + + public static Builder builder() { + return new Builder<>(); + } + + public static class Builder extends FieldArgs.Builder, Builder> { + + public Builder() { + super(new TestFieldArgs<>()); + } + + } + + } + + @Test + void testDefaultFieldArgs() { + TestFieldArgs field = TestFieldArgs. builder().name("test_field").build(); + + assertThat(field.getName()).isEqualTo("test_field"); + assertThat(field.getAs()).isEmpty(); + assertThat(field.isSortable()).isFalse(); + assertThat(field.isUnNormalizedForm()).isFalse(); + assertThat(field.isNoIndex()).isFalse(); + assertThat(field.isIndexEmpty()).isFalse(); + assertThat(field.isIndexMissing()).isFalse(); + assertThat(field.getFieldType()).isEqualTo("TEST"); + } + + @Test + void testFieldArgsWithAlias() { + TestFieldArgs field = TestFieldArgs. builder().name("complex_field_name").as("simple_alias").build(); + + assertThat(field.getName()).isEqualTo("complex_field_name"); + assertThat(field.getAs()).hasValue("simple_alias"); + } + + @Test + void testFieldArgsWithSortable() { + TestFieldArgs field = TestFieldArgs. builder().name("sortable_field").sortable().build(); + + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isFalse(); + } + + @Test + void testFieldArgsWithSortableAndUnnormalized() { + TestFieldArgs field = TestFieldArgs. builder().name("sortable_field").sortable().unNormalizedForm() + .build(); + + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + } + + @Test + void testFieldArgsWithNoIndex() { + TestFieldArgs field = TestFieldArgs. builder().name("no_index_field").noIndex().build(); + + assertThat(field.isNoIndex()).isTrue(); + } + + @Test + void testFieldArgsWithIndexEmpty() { + TestFieldArgs field = TestFieldArgs. builder().name("index_empty_field").indexEmpty().build(); + + assertThat(field.isIndexEmpty()).isTrue(); + } + + @Test + void testFieldArgsWithIndexMissing() { + TestFieldArgs field = TestFieldArgs. builder().name("index_missing_field").indexMissing().build(); + + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testFieldArgsWithAllOptions() { + TestFieldArgs field = TestFieldArgs. builder().name("full_field").as("alias").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("full_field"); + assertThat(field.getAs()).hasValue("alias"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testFieldArgsBuild() { + TestFieldArgs field = TestFieldArgs. builder().name("test_field").as("alias").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("test_field"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("alias"); + assertThat(argsString).contains("TEST"); // Field type + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("UNF"); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + + @Test + void testFieldArgsMinimalBuild() { + TestFieldArgs field = TestFieldArgs. builder().name("simple_field").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_field"); + assertThat(argsString).contains("TEST"); // Field type + assertThat(argsString).doesNotContain("AS"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); + assertThat(argsString).doesNotContain("NOINDEX"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testFieldArgsSortableWithoutUnnormalized() { + TestFieldArgs field = TestFieldArgs. builder().name("sortable_field").sortable().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); // UNF should only appear with SORTABLE + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + TestFieldArgs field = TestFieldArgs. builder().name("chained_field").as("chained_alias").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("chained_field"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/GeoFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/GeoFieldArgsTest.java new file mode 100644 index 000000000..eec3e5331 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/GeoFieldArgsTest.java @@ -0,0 +1,224 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link GeoFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class GeoFieldArgsTest { + + @Test + void testDefaultGeoFieldArgs() { + GeoFieldArgs field = GeoFieldArgs. builder().name("location").build(); + + assertThat(field.getName()).isEqualTo("location"); + assertThat(field.getFieldType()).isEqualTo("GEO"); + assertThat(field.getAs()).isEmpty(); + assertThat(field.isSortable()).isFalse(); + assertThat(field.isUnNormalizedForm()).isFalse(); + assertThat(field.isNoIndex()).isFalse(); + assertThat(field.isIndexEmpty()).isFalse(); + assertThat(field.isIndexMissing()).isFalse(); + } + + @Test + void testGeoFieldArgsWithAlias() { + GeoFieldArgs field = GeoFieldArgs. builder().name("coordinates").as("location").build(); + + assertThat(field.getName()).isEqualTo("coordinates"); + assertThat(field.getAs()).hasValue("location"); + assertThat(field.getFieldType()).isEqualTo("GEO"); + } + + @Test + void testGeoFieldArgsWithSortable() { + GeoFieldArgs field = GeoFieldArgs. builder().name("position").sortable().build(); + + assertThat(field.getName()).isEqualTo("position"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isFalse(); + } + + @Test + void testGeoFieldArgsWithSortableAndUnnormalized() { + GeoFieldArgs field = GeoFieldArgs. builder().name("geo_point").sortable().unNormalizedForm().build(); + + assertThat(field.getName()).isEqualTo("geo_point"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + } + + @Test + void testGeoFieldArgsWithNoIndex() { + GeoFieldArgs field = GeoFieldArgs. builder().name("internal_location").noIndex().build(); + + assertThat(field.getName()).isEqualTo("internal_location"); + assertThat(field.isNoIndex()).isTrue(); + } + + @Test + void testGeoFieldArgsWithIndexEmpty() { + GeoFieldArgs field = GeoFieldArgs. builder().name("optional_location").indexEmpty().build(); + + assertThat(field.getName()).isEqualTo("optional_location"); + assertThat(field.isIndexEmpty()).isTrue(); + } + + @Test + void testGeoFieldArgsWithIndexMissing() { + GeoFieldArgs field = GeoFieldArgs. builder().name("nullable_location").indexMissing().build(); + + assertThat(field.getName()).isEqualTo("nullable_location"); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testGeoFieldArgsWithAllOptions() { + GeoFieldArgs field = GeoFieldArgs. builder().name("comprehensive_geo").as("geo").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("comprehensive_geo"); + assertThat(field.getAs()).hasValue("geo"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testGeoFieldArgsBuild() { + GeoFieldArgs field = GeoFieldArgs. builder().name("store_location").as("location").sortable() + .unNormalizedForm().indexEmpty().indexMissing().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("store_location"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("location"); + assertThat(argsString).contains("GEO"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("UNF"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + + @Test + void testGeoFieldArgsMinimalBuild() { + GeoFieldArgs field = GeoFieldArgs. builder().name("simple_geo").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_geo"); + assertThat(argsString).contains("GEO"); + assertThat(argsString).doesNotContain("AS"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); + assertThat(argsString).doesNotContain("NOINDEX"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testGeoFieldArgsSortableWithoutUnnormalized() { + GeoFieldArgs field = GeoFieldArgs. builder().name("sortable_geo").sortable().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); // UNF should only appear with SORTABLE when explicitly set + } + + @Test + void testGeoFieldArgsWithNoIndexOnly() { + GeoFieldArgs field = GeoFieldArgs. builder().name("no_index_geo").noIndex().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + GeoFieldArgs field = GeoFieldArgs. builder().name("chained_geo").as("chained_alias").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("chained_geo"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testGeoFieldArgsTypeSpecificBehavior() { + // Test that geo fields don't have type-specific arguments beyond common ones + GeoFieldArgs field = GeoFieldArgs. builder().name("geo_field").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + // Should only contain field name and type, no geo-specific arguments + assertThat(argsString).contains("geo_field"); + assertThat(argsString).contains("GEO"); + // Should not contain any text-specific, tag-specific, or numeric-specific arguments + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testGeoFieldArgsInheritedMethods() { + // Test that inherited methods from FieldArgs work correctly + GeoFieldArgs field = GeoFieldArgs. builder().name("inherited_geo").noIndex().indexEmpty().indexMissing() + .build(); + + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/GeoshapeFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/GeoshapeFieldArgsTest.java new file mode 100644 index 000000000..ebe249884 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/GeoshapeFieldArgsTest.java @@ -0,0 +1,243 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link GeoshapeFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class GeoshapeFieldArgsTest { + + @Test + void testDefaultGeoshapeFieldArgs() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("geometry").build(); + + assertThat(field.getName()).isEqualTo("geometry"); + assertThat(field.getFieldType()).isEqualTo("GEOSHAPE"); + assertThat(field.getCoordinateSystem()).isEmpty(); + assertThat(field.getAs()).isEmpty(); + assertThat(field.isSortable()).isFalse(); + assertThat(field.isUnNormalizedForm()).isFalse(); + assertThat(field.isNoIndex()).isFalse(); + assertThat(field.isIndexEmpty()).isFalse(); + assertThat(field.isIndexMissing()).isFalse(); + } + + @Test + void testGeoshapeFieldArgsWithSpherical() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("shape").spherical().build(); + + assertThat(field.getName()).isEqualTo("shape"); + assertThat(field.getCoordinateSystem()).hasValue(GeoshapeFieldArgs.CoordinateSystem.SPHERICAL); + } + + @Test + void testGeoshapeFieldArgsWithFlat() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("polygon").flat().build(); + + assertThat(field.getName()).isEqualTo("polygon"); + assertThat(field.getCoordinateSystem()).hasValue(GeoshapeFieldArgs.CoordinateSystem.FLAT); + } + + @Test + void testGeoshapeFieldArgsWithAlias() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("complex_geometry").as("geom").build(); + + assertThat(field.getName()).isEqualTo("complex_geometry"); + assertThat(field.getAs()).hasValue("geom"); + assertThat(field.getFieldType()).isEqualTo("GEOSHAPE"); + } + + @Test + void testGeoshapeFieldArgsWithSortable() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("sortable_shape").sortable().build(); + + assertThat(field.getName()).isEqualTo("sortable_shape"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isFalse(); + } + + @Test + void testGeoshapeFieldArgsWithAllOptions() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("comprehensive_geoshape").as("shape").flat() + .sortable().unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("comprehensive_geoshape"); + assertThat(field.getAs()).hasValue("shape"); + assertThat(field.getCoordinateSystem()).hasValue(GeoshapeFieldArgs.CoordinateSystem.FLAT); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testCoordinateSystemEnum() { + assertThat(GeoshapeFieldArgs.CoordinateSystem.FLAT.name()).isEqualTo("FLAT"); + assertThat(GeoshapeFieldArgs.CoordinateSystem.SPHERICAL.name()).isEqualTo("SPHERICAL"); + } + + @Test + void testGeoshapeFieldArgsBuildWithSpherical() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("spherical_shape").as("shape").spherical() + .sortable().indexEmpty().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("spherical_shape"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("shape"); + assertThat(argsString).contains("GEOSHAPE"); + assertThat(argsString).contains("SPHERICAL"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("INDEXEMPTY"); + } + + @Test + void testGeoshapeFieldArgsBuildWithFlat() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("flat_shape").as("cartesian").flat() + .sortable().unNormalizedForm().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("flat_shape"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("cartesian"); + assertThat(argsString).contains("GEOSHAPE"); + assertThat(argsString).contains("FLAT"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("UNF"); + } + + @Test + void testGeoshapeFieldArgsMinimalBuild() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("simple_geoshape").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_geoshape"); + assertThat(argsString).contains("GEOSHAPE"); + assertThat(argsString).doesNotContain("AS"); + assertThat(argsString).doesNotContain("SPHERICAL"); + assertThat(argsString).doesNotContain("FLAT"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); + assertThat(argsString).doesNotContain("NOINDEX"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testGeoshapeFieldArgsWithNoIndex() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("no_index_shape").noIndex().build(); + + assertThat(field.getName()).isEqualTo("no_index_shape"); + assertThat(field.isNoIndex()).isTrue(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testGeoshapeFieldArgsWithIndexEmpty() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("index_empty_shape").indexEmpty().build(); + + assertThat(field.getName()).isEqualTo("index_empty_shape"); + assertThat(field.isIndexEmpty()).isTrue(); + } + + @Test + void testGeoshapeFieldArgsWithIndexMissing() { + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("index_missing_shape").indexMissing() + .build(); + + assertThat(field.getName()).isEqualTo("index_missing_shape"); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("chained_geoshape").as("chained_alias") + .spherical().sortable().unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("chained_geoshape"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.getCoordinateSystem()).hasValue(GeoshapeFieldArgs.CoordinateSystem.SPHERICAL); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testGeoshapeFieldArgsTypeSpecificBehavior() { + // Test that geoshape fields have their specific arguments and not others + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("geoshape_field").flat().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + // Should contain geoshape-specific arguments + assertThat(argsString).contains("GEOSHAPE"); + assertThat(argsString).contains("FLAT"); + // Should not contain text-specific, tag-specific, or numeric-specific arguments + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testGeoshapeFieldArgsInheritedMethods() { + // Test that inherited methods from FieldArgs work correctly + GeoshapeFieldArgs field = GeoshapeFieldArgs. builder().name("inherited_geoshape").noIndex().indexEmpty() + .indexMissing().build(); + + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/NumericFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/NumericFieldArgsTest.java new file mode 100644 index 000000000..a28c839a4 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/NumericFieldArgsTest.java @@ -0,0 +1,205 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link NumericFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class NumericFieldArgsTest { + + @Test + void testDefaultNumericFieldArgs() { + NumericFieldArgs field = NumericFieldArgs. builder().name("price").build(); + + assertThat(field.getName()).isEqualTo("price"); + assertThat(field.getFieldType()).isEqualTo("NUMERIC"); + assertThat(field.getAs()).isEmpty(); + assertThat(field.isSortable()).isFalse(); + assertThat(field.isUnNormalizedForm()).isFalse(); + assertThat(field.isNoIndex()).isFalse(); + assertThat(field.isIndexEmpty()).isFalse(); + assertThat(field.isIndexMissing()).isFalse(); + } + + @Test + void testNumericFieldArgsWithAlias() { + NumericFieldArgs field = NumericFieldArgs. builder().name("product_price").as("price").build(); + + assertThat(field.getName()).isEqualTo("product_price"); + assertThat(field.getAs()).hasValue("price"); + assertThat(field.getFieldType()).isEqualTo("NUMERIC"); + } + + @Test + void testNumericFieldArgsWithSortable() { + NumericFieldArgs field = NumericFieldArgs. builder().name("rating").sortable().build(); + + assertThat(field.getName()).isEqualTo("rating"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isFalse(); + } + + @Test + void testNumericFieldArgsWithSortableAndUnnormalized() { + NumericFieldArgs field = NumericFieldArgs. builder().name("score").sortable().unNormalizedForm() + .build(); + + assertThat(field.getName()).isEqualTo("score"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + } + + @Test + void testNumericFieldArgsWithNoIndex() { + NumericFieldArgs field = NumericFieldArgs. builder().name("internal_id").noIndex().build(); + + assertThat(field.getName()).isEqualTo("internal_id"); + assertThat(field.isNoIndex()).isTrue(); + } + + @Test + void testNumericFieldArgsWithIndexEmpty() { + NumericFieldArgs field = NumericFieldArgs. builder().name("optional_value").indexEmpty().build(); + + assertThat(field.getName()).isEqualTo("optional_value"); + assertThat(field.isIndexEmpty()).isTrue(); + } + + @Test + void testNumericFieldArgsWithIndexMissing() { + NumericFieldArgs field = NumericFieldArgs. builder().name("nullable_field").indexMissing().build(); + + assertThat(field.getName()).isEqualTo("nullable_field"); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testNumericFieldArgsWithAllOptions() { + NumericFieldArgs field = NumericFieldArgs. builder().name("comprehensive_numeric").as("num").sortable() + .unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("comprehensive_numeric"); + assertThat(field.getAs()).hasValue("num"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testNumericFieldArgsBuild() { + NumericFieldArgs field = NumericFieldArgs. builder().name("amount").as("total_amount").sortable() + .unNormalizedForm().indexEmpty().indexMissing().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("amount"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("total_amount"); + assertThat(argsString).contains("NUMERIC"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("UNF"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + + @Test + void testNumericFieldArgsMinimalBuild() { + NumericFieldArgs field = NumericFieldArgs. builder().name("simple_number").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_number"); + assertThat(argsString).contains("NUMERIC"); + assertThat(argsString).doesNotContain("AS"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); + assertThat(argsString).doesNotContain("NOINDEX"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testNumericFieldArgsSortableWithoutUnnormalized() { + NumericFieldArgs field = NumericFieldArgs. builder().name("sortable_number").sortable().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).doesNotContain("UNF"); // UNF should only appear with SORTABLE when explicitly set + } + + @Test + void testNumericFieldArgsWithNoIndexOnly() { + NumericFieldArgs field = NumericFieldArgs. builder().name("no_index_number").noIndex().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).doesNotContain("SORTABLE"); + assertThat(argsString).doesNotContain("INDEXEMPTY"); + assertThat(argsString).doesNotContain("INDEXMISSING"); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + NumericFieldArgs field = NumericFieldArgs. builder().name("chained_numeric").as("chained_alias") + .sortable().unNormalizedForm().noIndex().indexEmpty().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("chained_numeric"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testNumericFieldArgsTypeSpecificBehavior() { + // Test that numeric fields don't have type-specific arguments beyond common ones + NumericFieldArgs field = NumericFieldArgs. builder().name("numeric_field").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + // Should only contain field name and type, no numeric-specific arguments + assertThat(argsString).contains("numeric_field"); + assertThat(argsString).contains("NUMERIC"); + // Should not contain any text-specific or tag-specific arguments + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java new file mode 100644 index 000000000..60ffcf773 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java @@ -0,0 +1,138 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; + +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link SearchArgs}. + * + * @author Tihomir Mateev + */ +class SearchArgsTest { + + @Test + void testDefaultSearchArgs() { + SearchArgs args = SearchArgs. builder().build(); + + assertThat(args.isNoContent()).isFalse(); + assertThat(args.isWithScores()).isFalse(); + assertThat(args.isWithPayloads()).isFalse(); + assertThat(args.isWithSortKeys()).isFalse(); + } + + @Test + void testSearchArgsWithOptions() { + SearchArgs args = SearchArgs. builder().noContent().withScores().withPayloads() + .withSortKeys().verbatim().noStopWords().build(); + + assertThat(args.isNoContent()).isTrue(); + assertThat(args.isWithScores()).isTrue(); + assertThat(args.isWithPayloads()).isTrue(); + assertThat(args.isWithSortKeys()).isTrue(); + } + + @Test + void testSearchArgsWithFields() { + SearchArgs args = SearchArgs. builder().inKey("key1").inKey("key2").inField("field1") + .inField("field2").returnField("title").returnField("content", "text").build(); + + // Test that the args can be built without errors + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + // The command args should contain the appropriate keywords + String argsString = commandArgs.toString(); + assertThat(argsString).contains("INKEYS"); + assertThat(argsString).contains("INFIELDS"); + assertThat(argsString).contains("RETURN"); + } + + @Test + void testSearchArgsWithLimitAndTimeout() { + SearchArgs args = SearchArgs. builder().limit(10, 20).timeout(Duration.ofSeconds(5)) + .slop(2).inOrder().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("LIMIT"); + assertThat(argsString).contains("TIMEOUT"); + assertThat(argsString).contains("SLOP"); + assertThat(argsString).contains("INORDER"); + } + + @Test + void testSearchArgsWithLanguageAndScoring() { + SearchArgs args = SearchArgs. builder().language(DocumentLanguage.ENGLISH) + .scorer(ScoringFunction.TF_IDF).payload("test-payload").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("LANGUAGE"); + assertThat(argsString).contains("SCORER"); + assertThat(argsString).contains("PAYLOAD"); + } + + @Test + void testSearchArgsWithParams() { + SearchArgs args = SearchArgs. builder().param("param1", "value1") + .param("param2", "value2").dialect(QueryDialects.DIALECT3).build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("PARAMS"); + assertThat(argsString).contains("DIALECT"); + assertThat(argsString).contains("3"); // DIALECT3 + } + + @Test + void testSearchArgsWithSortBy() { + SortByArgs sortBy = SortByArgs. builder().attribute("score").descending().build(); + + SearchArgs args = SearchArgs. builder().sortBy(sortBy).build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("SORTBY"); + } + + @Test + void testSearchArgsWithHighlightAndSummarize() { + HighlightArgs highlight = HighlightArgs. builder().field("title").tags("", "") + .build(); + + SummarizeArgs summarize = SummarizeArgs. builder().field("content").fragments(3) + .len(100).separator("...").build(); + + SearchArgs args = SearchArgs. builder().highlight(highlight).summarize(summarize) + .build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + args.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("HIGHLIGHT"); + assertThat(argsString).contains("SUMMARIZE"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/TagFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/TagFieldArgsTest.java new file mode 100644 index 000000000..73487b3d8 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/TagFieldArgsTest.java @@ -0,0 +1,223 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link TagFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class TagFieldArgsTest { + + @Test + void testDefaultTagFieldArgs() { + TagFieldArgs field = TagFieldArgs. builder().name("category").build(); + + assertThat(field.getName()).isEqualTo("category"); + assertThat(field.getFieldType()).isEqualTo("TAG"); + assertThat(field.getSeparator()).isEmpty(); + assertThat(field.isCaseSensitive()).isFalse(); + assertThat(field.isWithSuffixTrie()).isFalse(); + } + + @Test + void testTagFieldArgsWithSeparator() { + TagFieldArgs field = TagFieldArgs. builder().name("tags").separator("|").build(); + + assertThat(field.getName()).isEqualTo("tags"); + assertThat(field.getSeparator()).hasValue("|"); + } + + @Test + void testTagFieldArgsWithCaseSensitive() { + TagFieldArgs field = TagFieldArgs. builder().name("status").caseSensitive().build(); + + assertThat(field.getName()).isEqualTo("status"); + assertThat(field.isCaseSensitive()).isTrue(); + } + + @Test + void testTagFieldArgsWithSuffixTrie() { + TagFieldArgs field = TagFieldArgs. builder().name("keywords").withSuffixTrie().build(); + + assertThat(field.getName()).isEqualTo("keywords"); + assertThat(field.isWithSuffixTrie()).isTrue(); + } + + @Test + void testTagFieldArgsWithAllOptions() { + TagFieldArgs field = TagFieldArgs. builder().name("complex_tags").as("tags").separator(";") + .caseSensitive().withSuffixTrie().sortable().unNormalizedForm().build(); + + assertThat(field.getName()).isEqualTo("complex_tags"); + assertThat(field.getAs()).hasValue("tags"); + assertThat(field.getSeparator()).hasValue(";"); + assertThat(field.isCaseSensitive()).isTrue(); + assertThat(field.isWithSuffixTrie()).isTrue(); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isUnNormalizedForm()).isTrue(); + } + + @Test + void testTagFieldArgsBuild() { + TagFieldArgs field = TagFieldArgs. builder().name("labels").as("tag_labels").separator(",") + .caseSensitive().withSuffixTrie().sortable().indexEmpty().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("labels"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("tag_labels"); + assertThat(argsString).contains("TAG"); + assertThat(argsString).contains("SEPARATOR"); + assertThat(argsString).contains(","); + assertThat(argsString).contains("CASESENSITIVE"); + assertThat(argsString).contains("WITHSUFFIXTRIE"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("INDEXEMPTY"); + } + + @Test + void testTagFieldArgsMinimalBuild() { + TagFieldArgs field = TagFieldArgs. builder().name("simple_tag").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_tag"); + assertThat(argsString).contains("TAG"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + assertThat(argsString).doesNotContain("SORTABLE"); + } + + @Test + void testTagFieldArgsWithSeparatorOnly() { + TagFieldArgs field = TagFieldArgs. builder().name("pipe_separated").separator("|").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("SEPARATOR"); + assertThat(argsString).contains("|"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testTagFieldArgsWithCaseSensitiveOnly() { + TagFieldArgs field = TagFieldArgs. builder().name("case_sensitive_tag").caseSensitive().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("CASESENSITIVE"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testTagFieldArgsWithSuffixTrieOnly() { + TagFieldArgs field = TagFieldArgs. builder().name("suffix_trie_tag").withSuffixTrie().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("WITHSUFFIXTRIE"); + assertThat(argsString).doesNotContain("SEPARATOR"); + assertThat(argsString).doesNotContain("CASESENSITIVE"); + } + + @Test + void testTagFieldArgsWithCustomSeparators() { + // Test various separator characters + TagFieldArgs commaField = TagFieldArgs. builder().name("comma_tags").separator(",").build(); + TagFieldArgs pipeField = TagFieldArgs. builder().name("pipe_tags").separator("|").build(); + TagFieldArgs semicolonField = TagFieldArgs. builder().name("semicolon_tags").separator(";").build(); + TagFieldArgs spaceField = TagFieldArgs. builder().name("space_tags").separator(" ").build(); + + assertThat(commaField.getSeparator()).hasValue(","); + assertThat(pipeField.getSeparator()).hasValue("|"); + assertThat(semicolonField.getSeparator()).hasValue(";"); + assertThat(spaceField.getSeparator()).hasValue(" "); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + TagFieldArgs field = TagFieldArgs. builder().name("chained_tag").as("chained_alias").separator(":") + .caseSensitive().withSuffixTrie().sortable().noIndex().indexMissing().build(); + + assertThat(field.getName()).isEqualTo("chained_tag"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.getSeparator()).hasValue(":"); + assertThat(field.isCaseSensitive()).isTrue(); + assertThat(field.isWithSuffixTrie()).isTrue(); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + } + + @Test + void testTagFieldArgsInheritedMethods() { + // Test that inherited methods from FieldArgs work correctly + TagFieldArgs field = TagFieldArgs. builder().name("inherited_tag").noIndex().indexEmpty().indexMissing() + .build(); + + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + + @Test + void testTagFieldArgsTypeSpecificBehavior() { + // Test that tag fields have their specific arguments and not others + TagFieldArgs field = TagFieldArgs. builder().name("tag_field").separator(",").caseSensitive() + .withSuffixTrie().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + // Should contain tag-specific arguments + assertThat(argsString).contains("TAG"); + assertThat(argsString).contains("SEPARATOR"); + assertThat(argsString).contains("CASESENSITIVE"); + assertThat(argsString).contains("WITHSUFFIXTRIE"); + // Should not contain text-specific or numeric-specific arguments + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/TextFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/TextFieldArgsTest.java new file mode 100644 index 000000000..1fac43859 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/TextFieldArgsTest.java @@ -0,0 +1,195 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link TextFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class TextFieldArgsTest { + + @Test + void testDefaultTextFieldArgs() { + TextFieldArgs field = TextFieldArgs. builder().name("title").build(); + + assertThat(field.getName()).isEqualTo("title"); + assertThat(field.getFieldType()).isEqualTo("TEXT"); + assertThat(field.getWeight()).isEmpty(); + assertThat(field.isNoStem()).isFalse(); + assertThat(field.getPhonetic()).isEmpty(); + assertThat(field.isWithSuffixTrie()).isFalse(); + } + + @Test + void testTextFieldArgsWithWeight() { + TextFieldArgs field = TextFieldArgs. builder().name("title").weight(2L).build(); + + assertThat(field.getWeight()).hasValue(2L); + } + + @Test + void testTextFieldArgsWithNoStem() { + TextFieldArgs field = TextFieldArgs. builder().name("title").noStem().build(); + + assertThat(field.isNoStem()).isTrue(); + } + + @Test + void testTextFieldArgsWithPhonetic() { + TextFieldArgs field = TextFieldArgs. builder().name("title") + .phonetic(TextFieldArgs.PhoneticMatcher.ENGLISH).build(); + + assertThat(field.getPhonetic()).hasValue(TextFieldArgs.PhoneticMatcher.ENGLISH); + } + + @Test + void testTextFieldArgsWithSuffixTrie() { + TextFieldArgs field = TextFieldArgs. builder().name("title").withSuffixTrie().build(); + + assertThat(field.isWithSuffixTrie()).isTrue(); + } + + @Test + void testTextFieldArgsWithAllOptions() { + TextFieldArgs field = TextFieldArgs. builder().name("content").as("text_content").weight(2L).noStem() + .phonetic(TextFieldArgs.PhoneticMatcher.FRENCH).withSuffixTrie().sortable().build(); + + assertThat(field.getName()).isEqualTo("content"); + assertThat(field.getAs()).hasValue("text_content"); + assertThat(field.getWeight()).hasValue(2L); + assertThat(field.isNoStem()).isTrue(); + assertThat(field.getPhonetic()).hasValue(TextFieldArgs.PhoneticMatcher.FRENCH); + assertThat(field.isWithSuffixTrie()).isTrue(); + assertThat(field.isSortable()).isTrue(); + } + + @Test + void testPhoneticMatcherValues() { + assertThat(TextFieldArgs.PhoneticMatcher.ENGLISH.getMatcher()).isEqualTo("dm:en"); + assertThat(TextFieldArgs.PhoneticMatcher.FRENCH.getMatcher()).isEqualTo("dm:fr"); + assertThat(TextFieldArgs.PhoneticMatcher.PORTUGUESE.getMatcher()).isEqualTo("dm:pt"); + assertThat(TextFieldArgs.PhoneticMatcher.SPANISH.getMatcher()).isEqualTo("dm:es"); + } + + @Test + void testTextFieldArgsBuild() { + TextFieldArgs field = TextFieldArgs. builder().name("description").as("desc").weight(3L).noStem() + .phonetic(TextFieldArgs.PhoneticMatcher.SPANISH).withSuffixTrie().sortable().unNormalizedForm().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("description"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("desc"); + assertThat(argsString).contains("TEXT"); + assertThat(argsString).contains("WEIGHT"); + assertThat(argsString).contains("3"); + assertThat(argsString).contains("NOSTEM"); + assertThat(argsString).contains("PHONETIC"); + assertThat(argsString).contains("dm:es"); + assertThat(argsString).contains("WITHSUFFIXTRIE"); + assertThat(argsString).contains("SORTABLE"); + assertThat(argsString).contains("UNF"); + } + + @Test + void testTextFieldArgsMinimalBuild() { + TextFieldArgs field = TextFieldArgs. builder().name("simple_text").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_text"); + assertThat(argsString).contains("TEXT"); + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + assertThat(argsString).doesNotContain("SORTABLE"); + } + + @Test + void testTextFieldArgsWithWeightOnly() { + TextFieldArgs field = TextFieldArgs. builder().name("weighted_field").weight(1L).build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("WEIGHT"); + assertThat(argsString).contains("1"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("PHONETIC"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testTextFieldArgsWithPhoneticOnly() { + TextFieldArgs field = TextFieldArgs. builder().name("phonetic_field") + .phonetic(TextFieldArgs.PhoneticMatcher.PORTUGUESE).build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("PHONETIC"); + assertThat(argsString).contains("dm:pt"); + assertThat(argsString).doesNotContain("WEIGHT"); + assertThat(argsString).doesNotContain("NOSTEM"); + assertThat(argsString).doesNotContain("WITHSUFFIXTRIE"); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + TextFieldArgs field = TextFieldArgs. builder().name("chained_field").weight(2L).noStem() + .phonetic(TextFieldArgs.PhoneticMatcher.ENGLISH).withSuffixTrie().sortable().as("alias").build(); + + assertThat(field.getName()).isEqualTo("chained_field"); + assertThat(field.getAs()).hasValue("alias"); + assertThat(field.getWeight()).hasValue(2L); + assertThat(field.isNoStem()).isTrue(); + assertThat(field.getPhonetic()).hasValue(TextFieldArgs.PhoneticMatcher.ENGLISH); + assertThat(field.isWithSuffixTrie()).isTrue(); + assertThat(field.isSortable()).isTrue(); + } + + @Test + void testTextFieldArgsInheritedMethods() { + // Test that inherited methods from FieldArgs work correctly + TextFieldArgs field = TextFieldArgs. builder().name("inherited_field").noIndex().indexEmpty() + .indexMissing().build(); + + assertThat(field.isNoIndex()).isTrue(); + assertThat(field.isIndexEmpty()).isTrue(); + assertThat(field.isIndexMissing()).isTrue(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("NOINDEX"); + assertThat(argsString).contains("INDEXEMPTY"); + assertThat(argsString).contains("INDEXMISSING"); + } + +} diff --git a/src/test/java/io/lettuce/core/search/arguments/VectorFieldArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/VectorFieldArgsTest.java new file mode 100644 index 000000000..f94487460 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/arguments/VectorFieldArgsTest.java @@ -0,0 +1,253 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search.arguments; + +import static io.lettuce.TestTags.UNIT_TEST; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.protocol.CommandArgs; + +/** + * Unit tests for {@link VectorFieldArgs}. + * + * @author Tihomir Mateev + */ +@Tag(UNIT_TEST) +class VectorFieldArgsTest { + + @Test + void testDefaultVectorFieldArgs() { + VectorFieldArgs field = VectorFieldArgs. builder().name("embedding").build(); + + assertThat(field.getName()).isEqualTo("embedding"); + assertThat(field.getFieldType()).isEqualTo("VECTOR"); + assertThat(field.getAlgorithm()).isEmpty(); + assertThat(field.getAttributes()).isEmpty(); + assertThat(field.getAs()).isEmpty(); + assertThat(field.isSortable()).isFalse(); + assertThat(field.isUnNormalizedForm()).isFalse(); + assertThat(field.isNoIndex()).isFalse(); + assertThat(field.isIndexEmpty()).isFalse(); + assertThat(field.isIndexMissing()).isFalse(); + } + + @Test + void testVectorFieldArgsWithFlat() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector").flat().build(); + + assertThat(field.getName()).isEqualTo("vector"); + assertThat(field.getAlgorithm()).hasValue(VectorFieldArgs.Algorithm.FLAT); + } + + @Test + void testVectorFieldArgsWithHnsw() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector").hnsw().build(); + + assertThat(field.getName()).isEqualTo("vector"); + assertThat(field.getAlgorithm()).hasValue(VectorFieldArgs.Algorithm.HNSW); + } + + @Test + void testVectorFieldArgsWithType() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector") + .type(VectorFieldArgs.VectorType.FLOAT32).build(); + + assertThat(field.getAttributes()).containsEntry("TYPE", "FLOAT32"); + } + + @Test + void testVectorFieldArgsWithDimensions() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector").dimensions(128).build(); + + assertThat(field.getAttributes()).containsEntry("DIM", 128); + } + + @Test + void testVectorFieldArgsWithDistanceMetric() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector") + .distanceMetric(VectorFieldArgs.DistanceMetric.COSINE).build(); + + assertThat(field.getAttributes()).containsEntry("DISTANCE_METRIC", "COSINE"); + } + + @Test + void testVectorFieldArgsWithCustomAttribute() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector").attribute("INITIAL_CAP", 1000) + .build(); + + assertThat(field.getAttributes()).containsEntry("INITIAL_CAP", 1000); + } + + @Test + void testVectorFieldArgsWithMultipleAttributes() { + VectorFieldArgs field = VectorFieldArgs. builder().name("vector").attribute("BLOCK_SIZE", 512) + .attribute("M", 16).attribute("EF_CONSTRUCTION", 200).build(); + + assertThat(field.getAttributes()).containsEntry("BLOCK_SIZE", 512); + assertThat(field.getAttributes()).containsEntry("M", 16); + assertThat(field.getAttributes()).containsEntry("EF_CONSTRUCTION", 200); + } + + @Test + void testVectorFieldArgsWithAllFlatOptions() { + VectorFieldArgs field = VectorFieldArgs. builder().name("flat_vector").as("vector").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(256).distanceMetric(VectorFieldArgs.DistanceMetric.L2) + .attribute("INITIAL_CAP", 2000).attribute("BLOCK_SIZE", 1024).sortable().build(); + + assertThat(field.getName()).isEqualTo("flat_vector"); + assertThat(field.getAs()).hasValue("vector"); + assertThat(field.getAlgorithm()).hasValue(VectorFieldArgs.Algorithm.FLAT); + assertThat(field.getAttributes()).containsEntry("TYPE", "FLOAT32"); + assertThat(field.getAttributes()).containsEntry("DIM", 256); + assertThat(field.getAttributes()).containsEntry("DISTANCE_METRIC", "L2"); + assertThat(field.getAttributes()).containsEntry("INITIAL_CAP", 2000); + assertThat(field.getAttributes()).containsEntry("BLOCK_SIZE", 1024); + assertThat(field.isSortable()).isTrue(); + } + + @Test + void testVectorFieldArgsWithAllHnswOptions() { + VectorFieldArgs field = VectorFieldArgs. builder().name("hnsw_vector").as("vector").hnsw() + .type(VectorFieldArgs.VectorType.FLOAT64).dimensions(512).distanceMetric(VectorFieldArgs.DistanceMetric.IP) + .attribute("INITIAL_CAP", 5000).attribute("M", 32).attribute("EF_CONSTRUCTION", 400).attribute("EF_RUNTIME", 20) + .attribute("EPSILON", 0.005).sortable().build(); + + assertThat(field.getName()).isEqualTo("hnsw_vector"); + assertThat(field.getAs()).hasValue("vector"); + assertThat(field.getAlgorithm()).hasValue(VectorFieldArgs.Algorithm.HNSW); + assertThat(field.getAttributes()).containsEntry("TYPE", "FLOAT64"); + assertThat(field.getAttributes()).containsEntry("DIM", 512); + assertThat(field.getAttributes()).containsEntry("DISTANCE_METRIC", "IP"); + assertThat(field.getAttributes()).containsEntry("INITIAL_CAP", 5000); + assertThat(field.getAttributes()).containsEntry("M", 32); + assertThat(field.getAttributes()).containsEntry("EF_CONSTRUCTION", 400); + assertThat(field.getAttributes()).containsEntry("EF_RUNTIME", 20); + assertThat(field.getAttributes()).containsEntry("EPSILON", 0.005); + assertThat(field.isSortable()).isTrue(); + } + + @Test + void testVectorTypeEnum() { + assertThat(VectorFieldArgs.VectorType.FLOAT32.name()).isEqualTo("FLOAT32"); + assertThat(VectorFieldArgs.VectorType.FLOAT64.name()).isEqualTo("FLOAT64"); + } + + @Test + void testDistanceMetricEnum() { + assertThat(VectorFieldArgs.DistanceMetric.L2.name()).isEqualTo("L2"); + assertThat(VectorFieldArgs.DistanceMetric.IP.name()).isEqualTo("IP"); + assertThat(VectorFieldArgs.DistanceMetric.COSINE.name()).isEqualTo("COSINE"); + } + + @Test + void testAlgorithmEnum() { + assertThat(VectorFieldArgs.Algorithm.FLAT.name()).isEqualTo("FLAT"); + assertThat(VectorFieldArgs.Algorithm.HNSW.name()).isEqualTo("HNSW"); + } + + @Test + void testVectorFieldArgsBuildFlat() { + VectorFieldArgs field = VectorFieldArgs. builder().name("test_vector").as("vector").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(128).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) + .attribute("INITIAL_CAP", 1000).attribute("BLOCK_SIZE", 512).sortable().build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("test_vector"); + assertThat(argsString).contains("AS"); + assertThat(argsString).contains("vector"); + assertThat(argsString).contains("VECTOR"); + assertThat(argsString).contains("FLAT"); + assertThat(argsString).contains("TYPE"); + assertThat(argsString).contains("FLOAT32"); + assertThat(argsString).contains("DIM"); + assertThat(argsString).contains("128"); + assertThat(argsString).contains("DISTANCE_METRIC"); + assertThat(argsString).contains("COSINE"); + assertThat(argsString).contains("INITIAL_CAP"); + assertThat(argsString).contains("1000"); + assertThat(argsString).contains("BLOCK_SIZE"); + assertThat(argsString).contains("512"); + assertThat(argsString).contains("SORTABLE"); + } + + @Test + void testVectorFieldArgsBuildHnsw() { + VectorFieldArgs field = VectorFieldArgs. builder().name("hnsw_test").hnsw() + .type(VectorFieldArgs.VectorType.FLOAT64).dimensions(256).distanceMetric(VectorFieldArgs.DistanceMetric.L2) + .attribute("M", 16).attribute("EF_CONSTRUCTION", 200).attribute("EF_RUNTIME", 10).attribute("EPSILON", 0.01) + .build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("hnsw_test"); + assertThat(argsString).contains("VECTOR"); + assertThat(argsString).contains("HNSW"); + assertThat(argsString).contains("TYPE"); + assertThat(argsString).contains("FLOAT64"); + assertThat(argsString).contains("DIM"); + assertThat(argsString).contains("256"); + assertThat(argsString).contains("DISTANCE_METRIC"); + assertThat(argsString).contains("L2"); + assertThat(argsString).contains("M"); + assertThat(argsString).contains("16"); + assertThat(argsString).contains("EF_CONSTRUCTION"); + assertThat(argsString).contains("200"); + assertThat(argsString).contains("EF_RUNTIME"); + assertThat(argsString).contains("10"); + assertThat(argsString).contains("EPSILON"); + assertThat(argsString).contains("0.01"); + } + + @Test + void testVectorFieldArgsMinimalBuild() { + VectorFieldArgs field = VectorFieldArgs. builder().name("simple_vector").build(); + + CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); + field.build(commandArgs); + + String argsString = commandArgs.toString(); + assertThat(argsString).contains("simple_vector"); + assertThat(argsString).contains("VECTOR"); + assertThat(argsString).doesNotContain("AS"); + assertThat(argsString).doesNotContain("FLAT"); + assertThat(argsString).doesNotContain("HNSW"); + assertThat(argsString).doesNotContain("TYPE"); + assertThat(argsString).doesNotContain("DIM"); + assertThat(argsString).doesNotContain("DISTANCE_METRIC"); + assertThat(argsString).doesNotContain("SORTABLE"); + } + + @Test + void testBuilderMethodChaining() { + // Test that builder methods return the correct type for method chaining + VectorFieldArgs field = VectorFieldArgs. builder().name("chained_vector").as("chained_alias").flat() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(64).distanceMetric(VectorFieldArgs.DistanceMetric.IP) + .attribute("INITIAL_CAP", 500).attribute("BLOCK_SIZE", 256).sortable().noIndex().build(); + + assertThat(field.getName()).isEqualTo("chained_vector"); + assertThat(field.getAs()).hasValue("chained_alias"); + assertThat(field.getAlgorithm()).hasValue(VectorFieldArgs.Algorithm.FLAT); + assertThat(field.getAttributes()).containsEntry("TYPE", "FLOAT32"); + assertThat(field.getAttributes()).containsEntry("DIM", 64); + assertThat(field.getAttributes()).containsEntry("DISTANCE_METRIC", "IP"); + assertThat(field.getAttributes()).containsEntry("INITIAL_CAP", 500); + assertThat(field.getAttributes()).containsEntry("BLOCK_SIZE", 256); + assertThat(field.isSortable()).isTrue(); + assertThat(field.isNoIndex()).isTrue(); + } + +}