diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 3b2d8e7e79..0883d97393 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -321,4 +321,11 @@ jcl slf testcontainers Readme -DefaultAzureCredential \ No newline at end of file +DefaultAzureCredential +geospatial +Geospatial +RediSearch +embeddings +Dimensionality +HNSW +VectorSet \ No newline at end of file diff --git a/docs/new-features.md b/docs/new-features.md index c67cd1089d..0eec2f8230 100644 --- a/docs/new-features.md +++ b/docs/new-features.md @@ -1,5 +1,19 @@ # New & Noteworthy + +## What’s new in Lettuce 6.8 +- [RediSearch support](user-guide/redis-search.md) through `RediSearchCommands` and the respective reactive, async and Kotlin APIs + +## What’s new in Lettuce 6.7 +- [VectorSet support](user-guide/vector-sets.md) through `RedisVectorSetCommands` and the respective reactive, async and Kotlin APIs +- `ConnectionPoolSupport` also allows the user to provide custom connection validations + +## What’s new in Lettuce 6.6 +- Support `HGETDEL`, `HGETEX` and `HSETEX` +- Introduce command replay filter to avoid command replaying after reconnect +- Deprecate the STRALGO command and implement the LCS in its place +- Token based authentication integration with core extension + ## What’s new in Lettuce 6.5 - [RedisJSON support](user-guide/redis-json.md) through `RedisJSONCommands` and the respective reactive, async and Kotlin APIs diff --git a/docs/user-guide/redis-search.md b/docs/user-guide/redis-search.md new file mode 100644 index 0000000000..47d2be1be0 --- /dev/null +++ b/docs/user-guide/redis-search.md @@ -0,0 +1,847 @@ +# Redis Search support in Lettuce + +Lettuce supports [Redis Search](https://redis.io/docs/latest/develop/ai/search-and-query/) starting from [Lettuce 6.8.0.RELEASE](https://github.com/redis/lettuce/releases/tag/6.8.0.RELEASE). + +Redis Search provides a rich query engine that enables full-text search, vector search, geospatial queries, and aggregations on Redis data. It transforms Redis into a powerful document database, vector database, secondary index, and search engine. + +!!! INFO + Redis Search is available in Redis Open Source version 8.0, Redis Enterprise, and Redis Cloud. For older versions of Redis Open Source the functionality requires the RediSearch module to be loaded. + +!!! WARNING + Redis Search commands are marked as `@Experimental` in Lettuce 6.8 and may undergo API changes in future releases. The underlying Redis Search functionality is stable and production-ready. + +## Core Concepts + +Redis Search operates on **indexes** that define how your data should be searchable. An index specifies: + +- **Data source**: Which Redis keys to index (HASH or JSON documents) +- **Field definitions**: Which fields are searchable and their types (TEXT, NUMERIC, TAG, GEO, VECTOR) +- **Search capabilities**: Full-text search, exact matching, range queries, vector similarity + +## Getting Started + +### Basic Setup + +```java +RedisURI redisURI = RedisURI.Builder.redis("localhost").withPort(6379).build(); +RedisClient redisClient = RedisClient.create(redisURI); +StatefulRedisConnection connection = redisClient.connect(); +RediSearchCommands search = connection.sync(); +``` + +### Creating Your First Index + +```java +// Define searchable fields +List> fields = Arrays.asList( + TextFieldArgs.builder().name("title").build(), + TextFieldArgs.builder().name("content").build(), + NumericFieldArgs.builder().name("price").sortable().build(), + TagFieldArgs.builder().name("category").sortable().build() +); + +// Create the index +String result = search.ftCreate("products-idx", fields); +// Returns: "OK" +``` + +### Adding Data + +```java +// Add documents as Redis hashes +Map product1 = Map.of( + "title", "Wireless Headphones", + "content", "High-quality wireless headphones with noise cancellation", + "price", "199.99", + "category", "electronics" +); +redis.hmset("product:1", product1); + +Map product2 = Map.of( + "title", "Running Shoes", + "content", "Comfortable running shoes for daily exercise", + "price", "89.99", + "category", "sports" +); +redis.hmset("product:2", product2); +``` + +### Basic Search + +```java +// Simple text search +SearchReply results = search.ftSearch("products-idx", "wireless"); + +// Access results +System.out.println("Found " + results.getCount() + " documents"); +for (SearchReply.SearchResult result : results.getResults()) { + System.out.println("Key: " + result.getKey()); + System.out.println("Title: " + result.getFields().get("title")); +} +``` + +## Field Types and Indexing + +### Text Fields +Full-text searchable fields with stemming, phonetic matching, and scoring. + +```java +TextFieldArgs titleField = TextFieldArgs.builder() + .name("title") + .weight(2.0) // Boost importance in scoring + .sortable() // Enable sorting + .noStem() // Disable stemming + .phonetic(TextFieldArgs.PhoneticMatcher.ENGLISH) // Enable phonetic matching + .build(); +``` + +### Numeric Fields +For range queries and sorting on numeric values. + +```java +NumericFieldArgs priceField = NumericFieldArgs.builder() + .name("price") + .sortable() // Enable sorting + .noIndex() // Don't index for search, only for sorting + .build(); +``` + +### Tag Fields +For exact matching and faceted search. + +```java +TagFieldArgs categoryField = TagFieldArgs.builder() + .name("category") + .separator(",") // Custom separator for multiple tags + .sortable() + .build(); +``` + +### Geospatial Fields +For location-based queries. + +```java +GeoFieldArgs locationField = GeoFieldArgs.builder() + .name("location") + .build(); +``` + +### Vector Fields +For semantic search and similarity matching. + +```java +VectorFieldArgs embeddingField = VectorFieldArgs.builder() + .name("embedding") + .algorithm(VectorAlgorithm.FLAT) + .type(VectorType.FLOAT32) + .dimension(768) + .distanceMetric(DistanceMetric.COSINE) + .build(); +``` + +## Advanced Index Configuration + +### Index with Custom Settings + +```java +CreateArgs createArgs = CreateArgs.builder() + .on(IndexDataType.HASH) // Index HASH documents + .withPrefix("product:") // Only index keys with this prefix + .language("english") // Default language for text processing + .languageField("lang") // Field containing document language + .score(0.5) // Default document score + .scoreField("popularity") // Field containing document score + .maxTextFields() // Allow unlimited text fields + .temporary(3600) // Auto-expire index after 1 hour + .noOffsets() // Disable term offset storage + .noHighlighting() // Disable highlighting + .noFields() // Don't store field contents + .noFreqs() // Don't store term frequencies + .stopwords("the", "a", "an") // Custom stopwords + .build(); + +String result = search.ftCreate("advanced-idx", createArgs, fields); +``` + +### JSON Document Indexing + +```java +CreateArgs jsonArgs = CreateArgs.builder() + .on(IndexDataType.JSON) + .prefix("user:") + .build(); + +List> jsonFields = Arrays.asList( + TextFieldArgs.builder().name("$.name").as("name").build(), + NumericFieldArgs.builder().name("$.age").as("age").build(), + TagFieldArgs.builder().name("$.tags[*]").as("tags").build() +); + +search.ftCreate("users-idx", jsonArgs, jsonFields); +``` + +## Search Queries + +### Query Syntax + +Redis Search supports a rich query language: + +```java +// Simple term search +search.ftSearch("products-idx", "wireless"); + +// Phrase search +search.ftSearch("products-idx", "\"noise cancellation\""); + +// Boolean operators +search.ftSearch("products-idx", "wireless AND headphones"); +search.ftSearch("products-idx", "headphones OR earbuds"); +search.ftSearch("products-idx", "audio -speakers"); + +// Field-specific search +search.ftSearch("products-idx", "@title:wireless @category:electronics"); + +// Wildcard and fuzzy search +search.ftSearch("products-idx", "wireles*"); // Prefix matching +search.ftSearch("products-idx", "%wireles%"); // Fuzzy matching + +// Numeric range queries +search.ftSearch("products-idx", "@price:[100 200]"); // Inclusive range +search.ftSearch("products-idx", "@price:[(100 (200]"); // Exclusive bounds +search.ftSearch("products-idx", "@price:[100 +inf]"); // Open range +``` + +### Advanced Search Options + +```java +SearchArgs searchArgs = SearchArgs.builder() + .limit(0, 10) // Pagination: offset 0, limit 10 + .sortBy("price", SortDirection.ASC) // Sort by price ascending + .returnFields("title", "price") // Only return specific fields + .highlightFields("title", "content") // Highlight specific fields + .highlightTags("", "") // Custom highlight tags + .summarizeFields("content") // Summarize specific fields + .summarizeFrags(3) // Number of summary fragments + .summarizeLen(50) // Summary length + .scorer(ScoringFunction.TF_IDF) // Scoring algorithm + .explainScore() // Include score explanation + .withScores() // Include document scores + .noContent() // Don't return document content + .verbatim() // Don't use stemming + .noStopwords() // Don't filter stopwords + .withSortKeys() // Include sort key values + .inKeys("product:1", "product:2") // Search only specific keys + .inFields("title", "content") // Search only specific fields + .slop(2) // Allow term reordering + .timeout(5000) // Query timeout in milliseconds + .params("category", "electronics") // Query parameters + .dialect(QueryDialects.DIALECT_2) // Query dialect version + .build(); + +SearchReply results = search.ftSearch("products-idx", "@title:$category", searchArgs); +``` + +## Vector Search + +Vector search enables semantic similarity matching using machine learning embeddings. + +### Creating a Vector Index + +```java +List> vectorFields = Arrays.asList( + TextFieldArgs.builder().name("title").build(), + VectorFieldArgs.builder() + .name("embedding") + .algorithm(VectorAlgorithm.FLAT) // or VectorAlgorithm.HNSW + .type(VectorType.FLOAT32) + .dimension(768) // Vector dimension + .distanceMetric(DistanceMetric.COSINE) // COSINE, L2, or IP + .initialCapacity(1000) // Initial vector capacity + .build() +); + +search.ftCreate("semantic-idx", vectorFields); +``` + +### Adding Vector Data + +```java +// Convert text to embeddings (using your ML model) +float[] embedding = textToEmbedding("wireless headphones"); +String embeddingStr = Arrays.toString(embedding); + +Map doc = Map.of( + "title", "Wireless Headphones", + "embedding", embeddingStr +); +redis.hmset("doc:1", doc); +``` + +### Vector Similarity Search + +```java +// Find similar documents using vector search +float[] queryVector = textToEmbedding("bluetooth audio device"); +String vectorQuery = "*=>[KNN 10 @embedding $query_vec AS score]"; + +SearchArgs vectorArgs = SearchArgs.builder() + .params("query_vec", Arrays.toString(queryVector)) + .sortBy("score", SortDirection.ASC) + .returnFields("title", "score") + .dialect(QueryDialects.DIALECT_2) + .build(); + +SearchReply results = search.ftSearch("semantic-idx", vectorQuery, vectorArgs); +``` + +## Geospatial Search + +Search for documents based on geographic location. + +### Creating a Geo Index + +```java +List> geoFields = Arrays.asList( + TextFieldArgs.builder().name("name").build(), + GeoFieldArgs.builder().name("location").build() +); + +search.ftCreate("places-idx", geoFields); +``` + +### Adding Geo Data + +```java +Map place = Map.of( + "name", "Central Park", + "location", "40.7829,-73.9654" // lat,lon format +); +redis.hmset("place:1", place); +``` + +### Geo Queries + +```java +// Find places within radius +SearchArgs geoArgs = SearchArgs.builder() + .geoFilter("location", 40.7829, -73.9654, 5, GeoUnit.KM) + .build(); + +SearchReply nearbyPlaces = search.ftSearch("places-idx", "*", geoArgs); + +// Geo query in search string +SearchReply results = search.ftSearch("places-idx", + "@location:[40.7829 -73.9654 5 km]"); +``` + +## Aggregations + +Aggregations provide powerful analytics capabilities for processing search results. + +### Basic Aggregation + +```java +// Simple aggregation without pipeline operations +AggregationReply results = search.ftAggregate("products-idx", "*"); +``` + +### Advanced Aggregation Pipeline + +```java +AggregateArgs aggArgs = AggregateArgs.builder() + // Load specific fields + .load("title").load("price").load("category") + + // Apply transformations + .apply("@price * 0.9", "discounted_price") + + // Filter results + .filter("@price > 50") + + // Group by category with reducers + .groupBy(GroupBy.of("category") + .reduce(Reducer.count().as("product_count")) + .reduce(Reducer.avg("@price").as("avg_price")) + .reduce(Reducer.sum("@price").as("total_value")) + .reduce(Reducer.min("@price").as("min_price")) + .reduce(Reducer.max("@price").as("max_price"))) + + // Sort results + .sortBy("avg_price", SortDirection.DESC) + + // Limit results + .limit(0, 10) + + // Apply final transformations + .apply("@total_value / @product_count", "calculated_avg") + + // Set query parameters + .verbatim() + .timeout(5000) + .params("min_price", "50") + .dialect(QueryDialects.DIALECT_2) + .build(); + +AggregationReply aggResults = search.ftAggregate("products-idx", "*", aggArgs); + +// Process aggregation results +for (SearchReply reply : aggResults.getReplies()) { + for (SearchReply.SearchResult result : reply.getResults()) { + System.out.println("Category: " + result.getFields().get("category")); + System.out.println("Count: " + result.getFields().get("product_count")); + System.out.println("Avg Price: " + result.getFields().get("avg_price")); + } +} +``` + +### Dynamic and Re-entrant Pipelines + +Redis aggregations support dynamic pipelines where operations can be repeated and applied in any order: + +```java +AggregateArgs complexPipeline = AggregateArgs.builder() + // First transformation + .apply("@price * @quantity", "total_value") + + // First filter + .filter("@total_value > 100") + + // First grouping + .groupBy(GroupBy.of("category") + .reduce(Reducer.sum("@total_value").as("category_revenue"))) + + // First sort + .sortBy("category_revenue", SortDirection.DESC) + + // Second transformation + .apply("@category_revenue / 1000", "revenue_k") + + // Second filter + .filter("@revenue_k > 5") + + // Second grouping (re-entrant) + .groupBy(GroupBy.of("revenue_k") + .reduce(Reducer.count().as("high_revenue_categories"))) + + // Second sort (re-entrant) + .sortBy("high_revenue_categories", SortDirection.DESC) + + .build(); +``` + +### Cursor-based Aggregation + +For large result sets, use cursors to process data in batches: + +```java +AggregateArgs cursorArgs = AggregateArgs.builder() + .groupBy(GroupBy.of("category") + .reduce(Reducer.count().as("count"))) + .withCursor() + .withCursor(1000, 300000) // batch size: 1000, timeout: 5 minutes + .build(); + +// Initial aggregation with cursor +AggregationReply firstBatch = search.ftAggregate("products-idx", "*", cursorArgs); +long cursorId = firstBatch.getCursorId(); + +// Read subsequent batches +while (cursorId != 0) { + AggregationReply nextBatch = search.ftCursorread("products-idx", cursorId, 500); + cursorId = nextBatch.getCursorId(); + + // Process batch + processResults(nextBatch); +} + +// Clean up cursor when done +search.ftCursordel("products-idx", cursorId); +``` + +## Index Management + +### Index Information and Statistics + +```java +// Get index information +Map info = search.ftInfo("products-idx"); +System.out.println("Index size: " + info.get("num_docs")); +System.out.println("Index memory: " + info.get("inverted_sz_mb") + " MB"); + +// List all indexes +List indexes = search.ftList(); +``` + +### Index Aliases + +```java +// Create an alias for easier index management +search.ftAliasadd("products", "products-idx-v1"); + +// Update alias to point to new index version +search.ftAliasupdate("products", "products-idx-v2"); + +// Remove alias +search.ftAliasdel("products"); +``` + +### Modifying Indexes + +```java +// Add new fields to existing index +List> newFields = Arrays.asList( + TagFieldArgs.builder().name("brand").build(), + NumericFieldArgs.builder().name("rating").build() +); + +search.ftAlter("products-idx", false, newFields); // false = scan existing docs +search.ftAlter("products-idx", true, newFields); // true = skip initial scan +``` + +### Index Cleanup + +```java +// Drop an index (keeps the data) +search.ftDropindex("products-idx"); + +// Drop an index and delete all associated documents +search.ftDropindex("products-idx", true); +``` + +## Auto-completion and Suggestions + +Redis Search provides auto-completion functionality for building search-as-you-type features. + +### Creating Suggestions + +```java +// Add suggestions to a dictionary +search.ftSugadd("autocomplete", "wireless headphones", 1.0); +search.ftSugadd("autocomplete", "bluetooth speakers", 0.8); +search.ftSugadd("autocomplete", "noise cancelling earbuds", 0.9); + +// Add with additional options +SugAddArgs sugArgs = SugAddArgs.builder() + .increment() // Increment score if suggestion exists + .payload("category:electronics") // Additional metadata + .build(); + +search.ftSugadd("autocomplete", "gaming headset", 0.7, sugArgs); +``` + +### Getting Suggestions + +```java +// Basic suggestion retrieval +List> suggestions = search.ftSugget("autocomplete", "head"); + +// Advanced suggestion options +SugGetArgs getArgs = SugGetArgs.builder() + .fuzzy() // Enable fuzzy matching + .max(5) // Limit to 5 suggestions + .withScores() // Include scores + .withPayloads() // Include payloads + .build(); + +List> results = search.ftSugget("autocomplete", "head", getArgs); + +for (Suggestion suggestion : results) { + System.out.println("Suggestion: " + suggestion.getValue()); + System.out.println("Score: " + suggestion.getScore()); + System.out.println("Payload: " + suggestion.getPayload()); +} +``` + +### Managing Suggestions + +```java +// Get suggestion dictionary size +Long count = search.ftSuglen("autocomplete"); + +// Delete a suggestion +Boolean deleted = search.ftSugdel("autocomplete", "old suggestion"); +``` + +## Spell Checking + +Redis Search can suggest corrections for misspelled queries. + +```java +// Basic spell check +List> corrections = search.ftSpellcheck("products-idx", "wireles hedphones"); + +// Advanced spell check with options +SpellCheckArgs spellArgs = SpellCheckArgs.builder() + .distance(2) // Maximum Levenshtein distance + .terms("include", "dictionary") // Include terms from dictionary + .terms("exclude", "stopwords") // Exclude stopwords + .dialect(QueryDialects.DIALECT_2) + .build(); + +List> results = search.ftSpellcheck("products-idx", "wireles hedphones", spellArgs); + +for (SpellCheckResult result : results) { + System.out.println("Original: " + result.getTerm()); + for (SpellCheckResult.Suggestion suggestion : result.getSuggestions()) { + System.out.println(" Suggestion: " + suggestion.getValue() + " (score: " + suggestion.getScore() + ")"); + } +} +``` + +## Dictionary Management + +Manage custom dictionaries for spell checking and synonyms. + +```java +// Add terms to dictionary +search.ftDictadd("custom_dict", "smartphone", "tablet", "laptop"); + +// Remove terms from dictionary +search.ftDictdel("custom_dict", "outdated_term"); + +// Get all terms in dictionary +List terms = search.ftDictdump("custom_dict"); +``` + +## Synonym Management + +Create synonym groups for query expansion. + +```java +// Create synonym group +search.ftSynupdate("products-idx", "group1", "phone", "smartphone", "mobile"); + +// Update synonym group (replaces existing) +SynUpdateArgs synArgs = SynUpdateArgs.builder() + .skipInitialScan() // Don't reindex existing documents + .build(); + +search.ftSynupdate("products-idx", "group1", synArgs, "phone", "smartphone", "mobile", "cellphone"); + +// Get synonym groups +Map> synonyms = search.ftSyndump("products-idx"); +``` + +## Query Profiling and Debugging + +### Query Explanation + +Understand how Redis Search executes your queries: + +```java +// Basic query explanation +String plan = search.ftExplain("products-idx", "@title:wireless"); + +// Detailed explanation with dialect +ExplainArgs explainArgs = ExplainArgs.builder() + .dialect(QueryDialects.DIALECT_2) + .build(); + +String detailedPlan = search.ftExplain("products-idx", "@title:wireless", explainArgs); +System.out.println("Execution plan: " + detailedPlan); +``` + +## Advanced Usage Patterns + +### Multi-Index Search + +Search across multiple indexes for federated queries: + +```java +// Create specialized indexes +search.ftCreate("products-idx", productFields); +search.ftCreate("reviews-idx", reviewFields); + +// Search each index separately and combine results +SearchReply productResults = search.ftSearch("products-idx", "wireless"); +SearchReply reviewResults = search.ftSearch("reviews-idx", "wireless"); + +// Combine and process results as needed +``` + +### Index Versioning and Blue-Green Deployment + +```java +// Create new index version +search.ftCreate("products-idx-v2", newFields); + +// Populate new index with updated data +// ... data migration logic ... + +// Switch alias to new index +search.ftAliasupdate("products", "products-idx-v2"); + +// Clean up old index after verification +search.ftDropindex("products-idx-v1"); +``` + +### Conditional Indexing + +```java +// Index only documents matching certain criteria +CreateArgs conditionalArgs = CreateArgs.builder() + .on(IndexDataType.HASH) + .prefix("product:") + .filter("@status=='active'") // Only index active products + .build(); + +search.ftCreate("active-products-idx", conditionalArgs, fields); +``` + +## Performance Optimization + +### Index Design Best Practices + +1. **Field Selection**: Only index fields you actually search on +2. **Text Field Optimization**: Use `NOOFFSETS`, `NOHL`, `NOFREQS` for memory savings +3. **Numeric Fields**: Use `NOINDEX` for sort-only fields +4. **Vector Fields**: Choose appropriate algorithm (FLAT vs HNSW) based on use case + +```java +// Memory-optimized text field +TextFieldArgs optimizedField = TextFieldArgs.builder() + .name("description") + .noOffsets() // Disable position tracking + .noHL() // Disable highlighting + .noFreqs() // Disable frequency tracking + .build(); + +// Sort-only numeric field +NumericFieldArgs sortField = NumericFieldArgs.builder() + .name("timestamp") + .sortable() + .noIndex() // Don't index for search + .build(); +``` + +### Query Optimization + +```java +// Use specific field searches instead of global search +search.ftSearch("idx", "@title:wireless"); // Better than "wireless" + +// Use numeric ranges for better performance +search.ftSearch("idx", "@price:[100 200]"); // Better than "@price:>=100 @price:<=200" + +// Limit result sets appropriately +SearchArgs limitedArgs = SearchArgs.builder() + .limit(0, 20) // Don't fetch more than needed + .noContent() // Skip content if only metadata needed + .build(); +``` + +## Error Handling and Troubleshooting + +### Common Error Scenarios + +```java +try { + search.ftCreate("existing-idx", fields); +} catch (RedisCommandExecutionException e) { + if (e.getMessage().contains("Index already exists")) { + // Handle index already exists + System.out.println("Index already exists, skipping creation"); + } else { + throw e; + } +} + +try { + SearchReply results = search.ftSearch("idx", "invalid:query["); +} catch (RedisCommandExecutionException e) { + if (e.getMessage().contains("Syntax error")) { + // Handle query syntax error + System.out.println("Invalid query syntax: " + e.getMessage()); + } +} +``` + +### Index Health Monitoring + +```java +// Monitor index statistics +Map info = search.ftInfo("products-idx"); +long numDocs = (Long) info.get("num_docs"); +double memoryMB = (Double) info.get("inverted_sz_mb"); + +if (memoryMB > 1000) { // Alert if index uses > 1GB + System.out.println("Warning: Index memory usage is high: " + memoryMB + " MB"); +} + +// Check for indexing errors +List errors = (List) info.get("hash_indexing_failures"); +if (!errors.isEmpty()) { + System.out.println("Indexing errors detected: " + errors); +} +``` + +## Integration Examples + +### Spring Boot Integration + +```java +@Configuration +public class RedisSearchConfig { + + @Bean + public RedisClient redisClient() { + return RedisClient.create("redis://localhost:6379"); + } + + @Bean + public RediSearchCommands rediSearchCommands(RedisClient client) { + return client.connect().sync(); + } +} + +@Service +public class ProductSearchService { + + @Autowired + private RediSearchCommands search; + + public List searchProducts(String query, int page, int size) { + SearchArgs args = SearchArgs.builder() + .limit(page * size, size) + .build(); + + SearchReply results = search.ftSearch("products-idx", query, args); + return convertToProducts(results); + } +} +``` + +### Reactive Programming + +```java +// Using reactive commands +StatefulRedisConnection connection = redisClient.connect(); +RediSearchReactiveCommands reactiveSearch = connection.reactive(); + +Mono> searchMono = reactiveSearch.ftSearch("products-idx", "wireless"); + +searchMono.subscribe(results -> { + System.out.println("Found " + results.getCount() + " results"); + results.getResults().forEach(result -> + System.out.println("Product: " + result.getFields().get("title")) + ); +}); +``` + +## Migration and Compatibility + +### Upgrading from RediSearch 1.x + +When migrating from older RediSearch versions: + +1. **Query Dialect**: Use `DIALECT 2` for new features +2. **Vector Fields**: Available in RediSearch 2.4+ +3. **JSON Support**: Requires RedisJSON module for versions of Redis before 8.0 +4. **Aggregation Cursors**: Available in RediSearch 2.0+ + +```java +// Ensure compatibility with modern features +SearchArgs modernArgs = SearchArgs.builder() + .dialect(QueryDialects.DIALECT_2) // Use latest dialect + .build(); +``` \ No newline at end of file diff --git a/docs/user-guide/vector-sets.md b/docs/user-guide/vector-sets.md new file mode 100644 index 0000000000..eb5ab63286 --- /dev/null +++ b/docs/user-guide/vector-sets.md @@ -0,0 +1,764 @@ +# Redis Vector Sets support in Lettuce + +Lettuce supports [Redis Vector Sets](https://redis.io/docs/latest/develop/data-types/vector-sets/) starting from [Lettuce 6.7.0.RELEASE](https://github.com/redis/lettuce/releases/tag/6.7.0.RELEASE). + +Redis Vector Sets are a new data type designed for efficient vector similarity search. Inspired by Redis sorted sets, vector sets store elements with associated high-dimensional vectors instead of scores, enabling fast similarity queries for AI and machine learning applications. + +!!! INFO + Vector Sets are currently in preview and available in Redis 8 Community Edition. The API may undergo changes in future releases based on community feedback. + +!!! WARNING + Vector Sets commands are marked as `@Experimental` in Lettuce 6.7 and may undergo API changes in future releases. The underlying Redis Vector Sets functionality is stable and production-ready. + +## Core Concepts + +Vector Sets extend the concept of Redis sorted sets by: + +- **Vector Storage**: Elements are associated with high-dimensional vectors instead of numeric scores +- **Similarity Search**: Find elements most similar to a query vector or existing element +- **Quantization**: Automatic vector compression to optimize memory usage +- **Filtering**: Associate JSON attributes with elements for filtered similarity search +- **Dimensionality Reduction**: Reduce vector dimensions using random projection + +## Getting Started + +### Basic Setup + +```java +RedisURI redisURI = RedisURI.Builder.redis("localhost").withPort(6379).build(); +RedisClient redisClient = RedisClient.create(redisURI); +StatefulRedisConnection connection = redisClient.connect(); +RedisVectorSetCommands vectorSet = connection.sync(); +``` + +### Creating Your First Vector Set + +```java +// Add vectors to a vector set (creates the set if it doesn't exist) +Boolean result1 = vectorSet.vadd("points", "pt:A", 1.0, 1.0); +Boolean result2 = vectorSet.vadd("points", "pt:B", -1.0, -1.0); +Boolean result3 = vectorSet.vadd("points", "pt:C", -1.0, 1.0); +Boolean result4 = vectorSet.vadd("points", "pt:D", 1.0, -1.0); +Boolean result5 = vectorSet.vadd("points", "pt:E", 1.0, 0.0); + +System.out.println("Added 5 points to vector set"); +``` + +### Basic Vector Set Operations + +```java +// Get the number of elements in the vector set +Long cardinality = vectorSet.vcard("points"); +System.out.println("Vector set contains: " + cardinality + " elements"); + +// Get the dimensionality of vectors in the set +Long dimensions = vectorSet.vdim("points"); +System.out.println("Vector dimensionality: " + dimensions); + +// Check if the key is a vector set +String type = redis.type("points"); +System.out.println("Data type: " + type); // Returns "vectorset" +``` + +## Vector Operations + +### Adding Vectors + +```java +// Basic vector addition +Boolean added = vectorSet.vadd("embeddings", "doc:1", 0.1, 0.2, 0.3, 0.4); + +// Add vector with specific dimensionality +Boolean addedWithDim = vectorSet.vadd("embeddings", 4, "doc:2", 0.5, 0.6, 0.7, 0.8); + +// Add vector with advanced options +VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) + .build(); +Boolean addedWithArgs = vectorSet.vadd("embeddings", 4, "doc:3", args, 0.9, 1.0, 1.1, 1.2); +``` + +### Retrieving Vectors + +```java +// Get the approximate vector for an element +List vector = vectorSet.vemb("points", "pt:A"); +System.out.println("Vector for pt:A: " + vector); + +// Get raw vector data (more efficient for large vectors) +RawVector rawVector = vectorSet.vembRaw("points", "pt:A"); +``` + +### Removing Vectors + +```java +// Remove an element from the vector set +Boolean removed = vectorSet.vrem("points", "pt:A"); +System.out.println("Element removed: " + removed); +``` + +## Vector Similarity Search + +### Basic Similarity Search + +```java +// Find elements most similar to a query vector +List similar = vectorSet.vsim("points", 0.9, 0.1); +System.out.println("Most similar elements: " + similar); + +// Find elements similar to an existing element +List similarToElement = vectorSet.vsim("points", "pt:A"); +System.out.println("Elements similar to pt:A: " + similarToElement); +``` + +### Advanced Similarity Search + +```java +// Similarity search with scores and options +VSimArgs simArgs = VSimArgs.Builder + .count(5) // Return top 5 results + .explorationFactor(200) // Search exploration factor + .build(); + +Map resultsWithScores = vectorSet.vsimWithScore("points", simArgs, 0.9, 0.1); +resultsWithScores.forEach((element, score) -> + System.out.println(element + ": " + score)); +``` + +## Element Attributes and Filtering + +### Setting and Getting Attributes + +```java +// Set JSON attributes for an element +String attributes = "{\"category\": \"electronics\", \"price\": 299.99, \"brand\": \"TechCorp\"}"; +Boolean attrSet = vectorSet.vsetattr("products", "item:1", attributes); + +// Get attributes for an element +String retrievedAttrs = vectorSet.vgetattr("products", "item:1"); +System.out.println("Attributes: " + retrievedAttrs); + +// Clear all attributes for an element +Boolean cleared = vectorSet.vClearAttributes("products", "item:1"); +``` + +### Filtered Similarity Search + +```java +// Add elements with attributes +vectorSet.vadd("products", "laptop:1", 0.1, 0.2, 0.3); +vectorSet.vsetattr("products", "laptop:1", "{\"category\": \"electronics\", \"price\": 999.99}"); + +vectorSet.vadd("products", "phone:1", 0.4, 0.5, 0.6); +vectorSet.vsetattr("products", "phone:1", "{\"category\": \"electronics\", \"price\": 599.99}"); + +vectorSet.vadd("products", "book:1", 0.7, 0.8, 0.9); +vectorSet.vsetattr("products", "book:1", "{\"category\": \"books\", \"price\": 29.99}"); + +// Search with attribute filtering +VSimArgs filterArgs = VSimArgs.Builder + .filter(".category == \"electronics\" && .price > 500") + .count(10) + .build(); + +List filteredResults = vectorSet.vsim("products", filterArgs, 0.2, 0.3, 0.4); +System.out.println("Filtered results: " + filteredResults); +``` + +## Advanced Features + +### Quantization Options + +Vector Sets support different quantization methods to optimize memory usage: + +```java +// No quantization (highest precision, most memory) +VAddArgs noQuantArgs = VAddArgs.Builder + .quantizationType(QuantizationType.NO_QUANTIZATION) + .build(); +vectorSet.vadd("precise_vectors", "element:1", noQuantArgs, 1.262185, 1.958231); + +// 8-bit quantization (default, good balance) +VAddArgs q8Args = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) + .build(); +vectorSet.vadd("balanced_vectors", "element:1", q8Args, 1.262185, 1.958231); + +// Binary quantization (lowest memory, fastest search) +VAddArgs binaryArgs = VAddArgs.Builder + .quantizationType(QuantizationType.BINARY) + .build(); +vectorSet.vadd("binary_vectors", "element:1", binaryArgs, 1.262185, 1.958231); + +// Compare the results +List precise = vectorSet.vemb("precise_vectors", "element:1"); +List balanced = vectorSet.vemb("balanced_vectors", "element:1"); +List binary = vectorSet.vemb("binary_vectors", "element:1"); + +System.out.println("Precise: " + precise); +System.out.println("Balanced: " + balanced); +System.out.println("Binary: " + binary); +``` + +### Dimensionality Reduction + +```java +// Create a high-dimensional vector (300 dimensions) +Double[] highDimVector = new Double[300]; +for (int i = 0; i < 300; i++) { + highDimVector[i] = (double) i / 299; +} + +// Add without reduction +vectorSet.vadd("full_vectors", "element:1", highDimVector); +Long fullDim = vectorSet.vdim("full_vectors"); +System.out.println("Full dimensions: " + fullDim); // 300 + +// Add with dimensionality reduction to 100 dimensions +vectorSet.vadd("reduced_vectors", 100, "element:1", highDimVector); +Long reducedDim = vectorSet.vdim("reduced_vectors"); +System.out.println("Reduced dimensions: " + reducedDim); // 100 +``` + +### Random Sampling + +```java +// Get random elements from the vector set +List randomElements = vectorSet.vrandmember("points", 3); +System.out.println("Random elements: " + randomElements); +``` + +## Vector Set Metadata and Inspection + +### Getting Vector Set Information + +```java +// Get detailed information about the vector set +VectorMetadata metadata = vectorSet.vinfo("points"); +System.out.println("Vector set metadata: " + metadata); + +// Get links/connections for HNSW graph structure +List links = vectorSet.vlinks("points", "pt:A"); +System.out.println("Graph links for pt:A: " + links); +``` + +## Real-World Use Cases + +### Semantic Search Application + +```java +public class SemanticSearchService { + private final RedisVectorSetCommands vectorSet; + + public SemanticSearchService(RedisVectorSetCommands vectorSet) { + this.vectorSet = vectorSet; + } + + // Add document with embedding and metadata + public void addDocument(String docId, double[] embedding, String title, String category) { + // Add vector to set + vectorSet.vadd("documents", docId, embedding); + + // Add metadata as attributes + String attributes = String.format( + "{\"title\": \"%s\", \"category\": \"%s\", \"timestamp\": %d}", + title, category, System.currentTimeMillis() + ); + vectorSet.vsetattr("documents", docId, attributes); + } + + // Search for similar documents with optional filtering + public List searchSimilar(double[] queryEmbedding, String categoryFilter, int limit) { + VSimArgs args = VSimArgs.Builder + .count(limit) + .filter(categoryFilter != null ? ".category == \"" + categoryFilter + "\"" : null) + .build(); + + return vectorSet.vsim("documents", args, queryEmbedding); + } + + // Get document details + public DocumentInfo getDocument(String docId) { + List embedding = vectorSet.vemb("documents", docId); + String attributes = vectorSet.vgetattr("documents", docId); + return new DocumentInfo(docId, embedding, attributes); + } +} +``` + +### Recommendation System + +```java +public class RecommendationEngine { + private final RedisVectorSetCommands vectorSet; + + public RecommendationEngine(RedisVectorSetCommands vectorSet) { + this.vectorSet = vectorSet; + } + + // Add user profile with preferences vector + public void addUserProfile(String userId, double[] preferencesVector, Map profile) { + // Use quantization for memory efficiency + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) + .build(); + + vectorSet.vadd("user_profiles", userId, args, preferencesVector); + + // Store user metadata + String attributes = convertToJson(profile); + vectorSet.vsetattr("user_profiles", userId, attributes); + } + + // Find similar users for collaborative filtering + public List findSimilarUsers(String userId, int count) { + VSimArgs args = VSimArgs.Builder + .count(count + 1) // +1 to exclude the user themselves + .build(); + + List similar = vectorSet.vsim("user_profiles", args, userId); + similar.remove(userId); // Remove the user from their own recommendations + return similar; + } + + // Get recommendations based on user similarity + public Map getRecommendations(String userId) { + VSimArgs args = VSimArgs.Builder + .count(10) + .build(); + + return vectorSet.vsimWithScore("user_profiles", args, userId); + } + + private String convertToJson(Map data) { + // Convert map to JSON string (implementation depends on your JSON library) + return "{}"; // Placeholder + } +} +``` + +### Image Similarity Search + +```java +public class ImageSearchService { + private final RedisVectorSetCommands vectorSet; + + public ImageSearchService(RedisVectorSetCommands vectorSet) { + this.vectorSet = vectorSet; + } + + // Add image with feature vector and metadata + public void indexImage(String imageId, float[] featureVector, String category, + int width, int height, String format) { + // Convert float array to Double array + Double[] vector = Arrays.stream(featureVector) + .mapToDouble(f -> (double) f) + .boxed() + .toArray(Double[]::new); + + // Use binary quantization for fast similarity search + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.BINARY) + .build(); + + vectorSet.vadd("images", imageId, args, vector); + + // Store image metadata + String attributes = String.format( + "{\"category\": \"%s\", \"width\": %d, \"height\": %d, \"format\": \"%s\"}", + category, width, height, format + ); + vectorSet.vsetattr("images", imageId, attributes); + } + + // Find visually similar images + public List findSimilarImages(String imageId, String categoryFilter, int limit) { + VSimArgs.Builder argsBuilder = VSimArgs.Builder.count(limit); + + if (categoryFilter != null) { + argsBuilder.filter(".category == \"" + categoryFilter + "\""); + } + + Map results = vectorSet.vsimWithScore("images", argsBuilder.build(), imageId); + + return results.entrySet().stream() + .map(entry -> new SimilarImage(entry.getKey(), entry.getValue())) + .collect(Collectors.toList()); + } + + public static class SimilarImage { + public final String imageId; + public final double similarity; + + public SimilarImage(String imageId, double similarity) { + this.imageId = imageId; + this.similarity = similarity; + } + } +} +``` + +## Performance Optimization + +### Memory Optimization + +```java +// Choose appropriate quantization based on your needs +public class VectorSetOptimizer { + + // For high-precision applications (e.g., scientific computing) + public void addHighPrecisionVector(RedisVectorSetCommands vectorSet, + String key, String element, double[] vector) { + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.NO_QUANTIZATION) + .build(); + vectorSet.vadd(key, element, args, vector); + } + + // For balanced performance and memory usage (recommended default) + public void addBalancedVector(RedisVectorSetCommands vectorSet, + String key, String element, double[] vector) { + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) + .build(); + vectorSet.vadd(key, element, args, vector); + } + + // For maximum speed and minimum memory (e.g., large-scale similarity search) + public void addFastVector(RedisVectorSetCommands vectorSet, + String key, String element, double[] vector) { + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.BINARY) + .build(); + vectorSet.vadd(key, element, args, vector); + } +} +``` + +### Search Performance Tuning + +```java +// Optimize similarity search performance +public class SearchOptimizer { + + // For high-recall searches (more thorough but slower) + public List highRecallSearch(RedisVectorSetCommands vectorSet, + String key, double[] query, int count) { + VSimArgs args = VSimArgs.Builder + .count(count) + .explorationFactor(500) // Higher exploration for better recall + .build(); + return vectorSet.vsim(key, args, query); + } + + // For fast searches (lower recall but much faster) + public List fastSearch(RedisVectorSetCommands vectorSet, + String key, double[] query, int count) { + VSimArgs args = VSimArgs.Builder + .count(count) + .explorationFactor(50) // Lower exploration for speed + .build(); + return vectorSet.vsim(key, args, query); + } + + // Batch similarity searches for efficiency + public Map> batchSearch(RedisVectorSetCommands vectorSet, + String key, List queries, int count) { + Map> results = new HashMap<>(); + + VSimArgs args = VSimArgs.Builder + .count(count) + .build(); + + for (int i = 0; i < queries.size(); i++) { + String queryId = "query_" + i; + List similar = vectorSet.vsim(key, args, queries.get(i)); + results.put(queryId, similar); + } + + return results; + } +} +``` + +## Error Handling and Best Practices + +### Common Error Scenarios + +```java +public class VectorSetErrorHandler { + + public void handleCommonErrors(RedisVectorSetCommands vectorSet) { + try { + // Attempt to add vector to non-existent key + vectorSet.vadd("my_vectors", "element:1", 1.0, 2.0, 3.0); + + } catch (RedisCommandExecutionException e) { + if (e.getMessage().contains("WRONGTYPE")) { + System.err.println("Key exists but is not a vector set"); + // Handle type mismatch + } else if (e.getMessage().contains("dimension mismatch")) { + System.err.println("Vector dimension doesn't match existing vectors"); + // Handle dimension mismatch + } + } + + try { + // Attempt to get vector from non-existent element + List vector = vectorSet.vemb("my_vectors", "non_existent"); + if (vector == null || vector.isEmpty()) { + System.out.println("Element not found in vector set"); + } + + } catch (RedisCommandExecutionException e) { + System.err.println("Error retrieving vector: " + e.getMessage()); + } + } + + // Validate vector dimensions before adding + public boolean addVectorSafely(RedisVectorSetCommands vectorSet, + String key, String element, double[] vector) { + try { + // Check if key exists and get its dimensions + Long existingDim = vectorSet.vdim(key); + if (existingDim != null && existingDim != vector.length) { + System.err.println("Dimension mismatch: expected " + existingDim + + ", got " + vector.length); + return false; + } + + vectorSet.vadd(key, element, vector); + return true; + + } catch (Exception e) { + System.err.println("Failed to add vector: " + e.getMessage()); + return false; + } + } +} +``` + +### Best Practices + +```java +public class VectorSetBestPractices { + + // 1. Use appropriate quantization for your use case + public void chooseQuantization() { + // High precision needed (scientific, financial) + VAddArgs highPrecision = VAddArgs.Builder + .quantizationType(QuantizationType.NO_QUANTIZATION) + .build(); + + // Balanced performance (most applications) + VAddArgs balanced = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) + .build(); + + // Maximum speed/minimum memory (large scale) + VAddArgs fast = VAddArgs.Builder + .quantizationType(QuantizationType.BINARY) + .build(); + } + + // 2. Batch operations for better performance + public void batchOperations(RedisVectorSetCommands vectorSet) { + // Instead of individual adds, batch them + List vectors = loadVectorData(); + + for (VectorData data : vectors) { + vectorSet.vadd("batch_vectors", data.id, data.vector); + if (!data.attributes.isEmpty()) { + vectorSet.vsetattr("batch_vectors", data.id, data.attributes); + } + } + } + + // 3. Use meaningful element names + public void useDescriptiveNames(RedisVectorSetCommands vectorSet) { + // Good: descriptive, hierarchical naming + vectorSet.vadd("products", "electronics:laptop:dell:xps13", 0.1, 0.2, 0.3); + vectorSet.vadd("users", "user:12345:preferences", 0.4, 0.5, 0.6); + + // Avoid: generic, non-descriptive names + // vectorSet.vadd("data", "item1", 0.1, 0.2, 0.3); + } + + // 4. Monitor vector set size and performance + public void monitorVectorSet(RedisVectorSetCommands vectorSet, String key) { + Long cardinality = vectorSet.vcard(key); + Long dimensions = vectorSet.vdim(key); + + System.out.println("Vector set '" + key + "' stats:"); + System.out.println(" Elements: " + cardinality); + System.out.println(" Dimensions: " + dimensions); + System.out.println(" Estimated memory: " + estimateMemoryUsage(cardinality, dimensions)); + } + + private String estimateMemoryUsage(Long elements, Long dimensions) { + // Rough estimation for Q8 quantization + long bytesPerVector = dimensions * 1; // 1 byte per dimension for Q8 + long totalBytes = elements * bytesPerVector; + return String.format("~%.2f MB", totalBytes / (1024.0 * 1024.0)); + } + + private List loadVectorData() { + // Placeholder for loading vector data + return new ArrayList<>(); + } + + private static class VectorData { + String id; + double[] vector; + String attributes; + } +} +``` + +## Integration Examples + +### Spring Boot Integration + +```java +@Configuration +public class VectorSetConfig { + + @Bean + public RedisClient redisClient() { + return RedisClient.create("redis://localhost:6379"); + } + + @Bean + public RedisVectorSetCommands vectorSetCommands(RedisClient client) { + return client.connect().sync(); + } +} + +@Service +public class VectorSearchService { + + @Autowired + private RedisVectorSetCommands vectorSet; + + public void addDocument(String docId, double[] embedding, Map metadata) { + vectorSet.vadd("documents", docId, embedding); + + if (!metadata.isEmpty()) { + String attributes = convertToJson(metadata); + vectorSet.vsetattr("documents", docId, attributes); + } + } + + public List searchSimilar(double[] query, int limit) { + VSimArgs args = VSimArgs.Builder.count(limit).build(); + return vectorSet.vsim("documents", args, query); + } + + private String convertToJson(Map metadata) { + // Use your preferred JSON library (Jackson, Gson, etc.) + return "{}"; // Placeholder + } +} +``` + +### Reactive Programming + +```java +public class ReactiveVectorService { + + private final RedisVectorSetReactiveCommands reactiveVectorSet; + + public ReactiveVectorService(RedisClient client) { + this.reactiveVectorSet = client.connect().reactive(); + } + + public Mono addVectorAsync(String key, String element, double[] vector) { + return reactiveVectorSet.vadd(key, element, vector); + } + + public Flux searchSimilarAsync(String key, double[] query, int count) { + VSimArgs args = VSimArgs.Builder.count(count).build(); + return reactiveVectorSet.vsim(key, args, query); + } + + public Mono> searchWithScoresAsync(String key, String element, int count) { + VSimArgs args = VSimArgs.Builder.count(count).build(); + return reactiveVectorSet.vsimWithScore(key, args, element); + } +} +``` + +## Migration and Compatibility + +### Migrating from Other Vector Databases + +```java +public class VectorMigrationService { + + // Migrate from external vector database to Redis Vector Sets + public void migrateVectors(List externalVectors, + RedisVectorSetCommands vectorSet) { + String targetKey = "migrated_vectors"; + + for (VectorRecord record : externalVectors) { + // Add vector with appropriate quantization + VAddArgs args = VAddArgs.Builder + .quantizationType(QuantizationType.Q8) // Balance of speed and precision + .build(); + + vectorSet.vadd(targetKey, record.getId(), args, record.getVector()); + + // Migrate metadata as attributes + if (record.getMetadata() != null) { + String attributes = convertMetadataToJson(record.getMetadata()); + vectorSet.vsetattr(targetKey, record.getId(), attributes); + } + } + + System.out.println("Migrated " + externalVectors.size() + " vectors to Redis Vector Sets"); + } + + // Validate migration by comparing similarity results + public void validateMigration(String originalQuery, List expectedResults, + RedisVectorSetCommands vectorSet) { + // Perform similarity search on migrated data + VSimArgs args = VSimArgs.Builder + .count(expectedResults.size()) + .build(); + + // Assuming originalQuery is converted to vector format + double[] queryVector = convertQueryToVector(originalQuery); + List actualResults = vectorSet.vsim("migrated_vectors", args, queryVector); + + // Compare results (implementation depends on your validation criteria) + boolean isValid = validateResults(expectedResults, actualResults); + System.out.println("Migration validation: " + (isValid ? "PASSED" : "FAILED")); + } + + private String convertMetadataToJson(Map metadata) { + // Convert metadata to JSON string + return "{}"; // Placeholder + } + + private double[] convertQueryToVector(String query) { + // Convert query to vector using your embedding model + return new double[]{0.0}; // Placeholder + } + + private boolean validateResults(List expected, List actual) { + // Implement your validation logic + return true; // Placeholder + } + + private static class VectorRecord { + private String id; + private double[] vector; + private Map metadata; + + // Getters and setters + public String getId() { return id; } + public double[] getVector() { return vector; } + public Map getMetadata() { return metadata; } + } +} +``` \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index eeb14014a6..17f0ab9aa1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -57,6 +57,8 @@ nav: - Publish/Subscribe: user-guide/pubsub.md - Transactions/Multi: user-guide/transactions-multi.md - Redis JSON: user-guide/redis-json.md + - RediSearch: user-guide/redis-search.md + - Redis Vector Sets: user-guide/vector-sets.md - Redis programmability: - LUA Scripting: user-guide/lua-scripting.md - Redis Functions: user-guide/redis-functions.md diff --git a/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionAsyncCommands.java b/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionAsyncCommands.java index 6a1e645184..a3cb8e3f4b 100644 --- a/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionAsyncCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionAsyncCommands.java @@ -9,10 +9,11 @@ * @author Mark Paluch * @author Tihomir Mateev */ -public interface NodeSelectionAsyncCommands extends BaseNodeSelectionAsyncCommands, - NodeSelectionFunctionAsyncCommands, NodeSelectionGeoAsyncCommands, NodeSelectionHashAsyncCommands, - NodeSelectionHLLAsyncCommands, NodeSelectionKeyAsyncCommands, NodeSelectionListAsyncCommands, - NodeSelectionScriptingAsyncCommands, NodeSelectionServerAsyncCommands, NodeSelectionSetAsyncCommands, - NodeSelectionSortedSetAsyncCommands, NodeSelectionStreamCommands, NodeSelectionStringAsyncCommands, - NodeSelectionJsonAsyncCommands, NodeSelectionVectorSetAsyncCommands { +public interface NodeSelectionAsyncCommands + extends BaseNodeSelectionAsyncCommands, NodeSelectionFunctionAsyncCommands, + NodeSelectionGeoAsyncCommands, NodeSelectionHashAsyncCommands, NodeSelectionHLLAsyncCommands, + NodeSelectionKeyAsyncCommands, NodeSelectionListAsyncCommands, NodeSelectionScriptingAsyncCommands, + NodeSelectionServerAsyncCommands, NodeSelectionSetAsyncCommands, NodeSelectionSortedSetAsyncCommands, + NodeSelectionStreamCommands, NodeSelectionStringAsyncCommands, NodeSelectionJsonAsyncCommands, + NodeSelectionVectorSetAsyncCommands, NodeSelectionSearchAsyncCommands { } diff --git a/src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java b/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionSearchAsyncCommands.java similarity index 99% rename from src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java rename to src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionSearchAsyncCommands.java index 6062461d69..236ee6c872 100644 --- a/src/main/java/io/lettuce/core/cluster/api/async/RediSearchAsyncCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionSearchAsyncCommands.java @@ -34,7 +34,7 @@ * @since 6.8 * @generated by io.lettuce.apigenerator.CreateAsyncNodeSelectionClusterApi */ -public interface RediSearchAsyncCommands { +public interface NodeSelectionSearchAsyncCommands { /** * Create a new search index with the given name and field definitions using default settings. diff --git a/src/main/java/io/lettuce/core/cluster/api/async/RedisClusterAsyncCommands.java b/src/main/java/io/lettuce/core/cluster/api/async/RedisClusterAsyncCommands.java index 0d05c83dae..3e46f10ade 100644 --- a/src/main/java/io/lettuce/core/cluster/api/async/RedisClusterAsyncCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/async/RedisClusterAsyncCommands.java @@ -37,12 +37,12 @@ * @author dengliming * @since 4.0 */ -public interface RedisClusterAsyncCommands - extends BaseRedisAsyncCommands, RedisAclAsyncCommands, RedisFunctionAsyncCommands, - RedisGeoAsyncCommands, RedisHashAsyncCommands, RedisHLLAsyncCommands, RedisKeyAsyncCommands, - RedisListAsyncCommands, RedisScriptingAsyncCommands, RedisServerAsyncCommands, - RedisSetAsyncCommands, RedisSortedSetAsyncCommands, RedisStreamAsyncCommands, - RedisStringAsyncCommands, RedisJsonAsyncCommands { +public interface RedisClusterAsyncCommands extends BaseRedisAsyncCommands, RedisAclAsyncCommands, + RedisFunctionAsyncCommands, RedisGeoAsyncCommands, RedisHashAsyncCommands, + RedisHLLAsyncCommands, RedisKeyAsyncCommands, RedisListAsyncCommands, + RedisScriptingAsyncCommands, RedisServerAsyncCommands, RedisSetAsyncCommands, + RedisSortedSetAsyncCommands, RedisStreamAsyncCommands, RedisStringAsyncCommands, + RedisJsonAsyncCommands, RedisVectorSetAsyncCommands, RediSearchAsyncCommands { /** * Set the default timeout for operations. A zero timeout value indicates to not time out. diff --git a/src/main/java/io/lettuce/core/cluster/api/reactive/RedisClusterReactiveCommands.java b/src/main/java/io/lettuce/core/cluster/api/reactive/RedisClusterReactiveCommands.java index fa34e9a27c..028fa639f8 100644 --- a/src/main/java/io/lettuce/core/cluster/api/reactive/RedisClusterReactiveCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/reactive/RedisClusterReactiveCommands.java @@ -37,12 +37,12 @@ * @author dengliming * @since 5.0 */ -public interface RedisClusterReactiveCommands - extends BaseRedisReactiveCommands, RedisAclReactiveCommands, RedisFunctionReactiveCommands, - RedisGeoReactiveCommands, RedisHashReactiveCommands, RedisHLLReactiveCommands, - RedisKeyReactiveCommands, RedisListReactiveCommands, RedisScriptingReactiveCommands, - RedisServerReactiveCommands, RedisSetReactiveCommands, RedisSortedSetReactiveCommands, - RedisStreamReactiveCommands, RedisStringReactiveCommands, RedisJsonReactiveCommands { +public interface RedisClusterReactiveCommands extends BaseRedisReactiveCommands, RedisAclReactiveCommands, + RedisFunctionReactiveCommands, RedisGeoReactiveCommands, RedisHashReactiveCommands, + RedisHLLReactiveCommands, RedisKeyReactiveCommands, RedisListReactiveCommands, + RedisScriptingReactiveCommands, RedisServerReactiveCommands, RedisSetReactiveCommands, + RedisSortedSetReactiveCommands, RedisStreamReactiveCommands, RedisStringReactiveCommands, + RedisJsonReactiveCommands, RedisVectorSetReactiveCommands, RediSearchReactiveCommands { /** * Set the default timeout for operations. A zero timeout value indicates to not time out. diff --git a/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionCommands.java b/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionCommands.java index b5abc3b113..732ed73013 100644 --- a/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionCommands.java @@ -8,10 +8,10 @@ * @author Mark Paluch * @author Tihomir Mateev */ -public interface NodeSelectionCommands - extends BaseNodeSelectionCommands, NodeSelectionFunctionCommands, NodeSelectionGeoCommands, - NodeSelectionHashCommands, NodeSelectionHLLCommands, NodeSelectionKeyCommands, - NodeSelectionListCommands, NodeSelectionScriptingCommands, NodeSelectionServerCommands, - NodeSelectionSetCommands, NodeSelectionSortedSetCommands, NodeSelectionStreamCommands, - NodeSelectionStringCommands, NodeSelectionJsonCommands, NodeSelectionVectorSetCommands { +public interface NodeSelectionCommands extends BaseNodeSelectionCommands, NodeSelectionFunctionCommands, + NodeSelectionGeoCommands, NodeSelectionHashCommands, NodeSelectionHLLCommands, + NodeSelectionKeyCommands, NodeSelectionListCommands, NodeSelectionScriptingCommands, + NodeSelectionServerCommands, NodeSelectionSetCommands, NodeSelectionSortedSetCommands, + NodeSelectionStreamCommands, NodeSelectionStringCommands, NodeSelectionJsonCommands, + NodeSelectionVectorSetCommands, NodeSelectionSearchCommands { } diff --git a/src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java b/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionSearchCommands.java similarity index 99% rename from src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java rename to src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionSearchCommands.java index edd167a3be..0c141805f4 100644 --- a/src/main/java/io/lettuce/core/cluster/api/sync/RediSearchCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionSearchCommands.java @@ -34,7 +34,7 @@ * @since 6.8 * @generated by io.lettuce.apigenerator.CreateSyncNodeSelectionClusterApi */ -public interface RediSearchCommands { +public interface NodeSelectionSearchCommands { /** * Create a new search index with the given name and field definitions using default settings. diff --git a/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java b/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java index c621df002d..de0bfb2c28 100644 --- a/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java +++ b/src/main/java/io/lettuce/core/cluster/api/sync/RedisClusterCommands.java @@ -37,11 +37,11 @@ * @author Tihomir Mateev * @since 4.0 */ -public interface RedisClusterCommands - extends BaseRedisCommands, RedisAclCommands, RedisFunctionCommands, RedisGeoCommands, - RedisHashCommands, RedisHLLCommands, RedisKeyCommands, RedisListCommands, - RedisScriptingCommands, RedisServerCommands, RedisSetCommands, RedisSortedSetCommands, - RedisStreamCommands, RedisStringCommands, RedisJsonCommands, RedisVectorSetCommands { +public interface RedisClusterCommands extends BaseRedisCommands, RedisAclCommands, + RedisFunctionCommands, RedisGeoCommands, RedisHashCommands, RedisHLLCommands, + RedisKeyCommands, RedisListCommands, RedisScriptingCommands, RedisServerCommands, + RedisSetCommands, RedisSortedSetCommands, RedisStreamCommands, RedisStringCommands, + RedisJsonCommands, RedisVectorSetCommands, RediSearchCommands { /** * Set the default timeout for operations. A zero timeout value indicates to not time out. diff --git a/src/main/java/io/lettuce/core/output/ArrayComplexData.java b/src/main/java/io/lettuce/core/output/ArrayComplexData.java index 5e447a9c5a..4a38a4d79a 100644 --- a/src/main/java/io/lettuce/core/output/ArrayComplexData.java +++ b/src/main/java/io/lettuce/core/output/ArrayComplexData.java @@ -82,4 +82,9 @@ public Map getDynamicMap() { return Collections.unmodifiableMap(map); } + @Override + public boolean isList() { + return true; + } + } diff --git a/src/main/java/io/lettuce/core/output/ComplexData.java b/src/main/java/io/lettuce/core/output/ComplexData.java index 08ef81a20f..0a761adc7e 100644 --- a/src/main/java/io/lettuce/core/output/ComplexData.java +++ b/src/main/java/io/lettuce/core/output/ComplexData.java @@ -115,4 +115,40 @@ public Map getDynamicMap() { throw new UnsupportedOperationException("The type of data stored in this dynamic object is not a map"); } + /** + * Returns true if the underlying data structure is a {@link Map} + *

+ * Does not mean that calling {@link #getDynamicMap()} would not throw an exception. Implementations might decide to return + * a representation of the data as a map, even if the underlying data structure is not a map. + * + * @return true if the underlying data structure is a {@link Map} + */ + public boolean isMap() { + return false; + } + + /** + * Returns true if the underlying data structure is a {@link Set} + *

+ * Does not mean that calling {@link #getDynamicSet()} would not throw an exception. Implementations might decide to return + * a representation of the data as a set, even if the underlying data structure is not a set. + * + * @return true if the underlying data structure is a {@link Set} + */ + public boolean isSet() { + return false; + } + + /** + * Returns true if the underlying data structure is a {@link List} + *

+ * Does not mean that calling {@link #getDynamicList()} would not throw an exception. Implementations might decide to return + * a representation of the data as a list, even if the underlying data structure is not a list. + * + * @return true if the underlying data structure is a {@link List} + */ + public boolean isList() { + return false; + } + } diff --git a/src/main/java/io/lettuce/core/output/ComplexDataParser.java b/src/main/java/io/lettuce/core/output/ComplexDataParser.java index 332fb61a4b..52821a6c8d 100644 --- a/src/main/java/io/lettuce/core/output/ComplexDataParser.java +++ b/src/main/java/io/lettuce/core/output/ComplexDataParser.java @@ -10,7 +10,7 @@ /** * Any usage of the {@link ComplexOutput} comes hand in hand with a respective {@link ComplexDataParser} that is able to parse * the data extracted from the server to a meaningful Java object. - * + * * @param the type of the parsed object * @author Tihomir Mateev * @see ComplexData diff --git a/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java b/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java index 38d421901f..f8f60a45f2 100644 --- a/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java +++ b/src/main/java/io/lettuce/core/output/EncodedComplexOutput.java @@ -25,14 +25,16 @@ public EncodedComplexOutput(RedisCodec codec, ComplexDataParser parser) @Override public void set(ByteBuffer bytes) { - data.storeObject(bytes.asReadOnlyBuffer()); - + if (bytes != null) { + data.storeObject(bytes.asReadOnlyBuffer()); + } } @Override public void setSingle(ByteBuffer bytes) { - data.storeObject(bytes.asReadOnlyBuffer()); - + if (bytes != null) { + data.storeObject(bytes.asReadOnlyBuffer()); + } } } diff --git a/src/main/java/io/lettuce/core/output/MapComplexData.java b/src/main/java/io/lettuce/core/output/MapComplexData.java index f2f2b29a70..a841dc375c 100644 --- a/src/main/java/io/lettuce/core/output/MapComplexData.java +++ b/src/main/java/io/lettuce/core/output/MapComplexData.java @@ -45,4 +45,9 @@ public Map getDynamicMap() { return Collections.unmodifiableMap(data); } + @Override + public boolean isMap() { + return true; + } + } diff --git a/src/main/java/io/lettuce/core/output/SetComplexData.java b/src/main/java/io/lettuce/core/output/SetComplexData.java index 0d95afdd45..91a8567dab 100644 --- a/src/main/java/io/lettuce/core/output/SetComplexData.java +++ b/src/main/java/io/lettuce/core/output/SetComplexData.java @@ -47,4 +47,9 @@ public List getDynamicList() { return Collections.unmodifiableList(list); } + @Override + public boolean isSet() { + return true; + } + } diff --git a/src/main/java/io/lettuce/core/search/SearchReplyParser.java b/src/main/java/io/lettuce/core/search/SearchReplyParser.java index 5d35ff1643..976b60714c 100644 --- a/src/main/java/io/lettuce/core/search/SearchReplyParser.java +++ b/src/main/java/io/lettuce/core/search/SearchReplyParser.java @@ -108,12 +108,11 @@ public SearchReplyParser(RedisCodec codec) { @Override public SearchReply parse(ComplexData data) { try { - try { + if (data.isList()) { return new Resp2SearchResultsParser().parse(data); - } catch (UnsupportedOperationException e) { - // automagically switch to RESP3 parsing if you encounter a ComplexData type different then an array - return new Resp3SearchResultsParser().parse(data); } + + return new Resp3SearchResultsParser().parse(data); } catch (Exception e) { LOG.warn("Unable to parse the result from Redis", e); return new SearchReply<>(); diff --git a/src/main/java/io/lettuce/core/search/SpellCheckResultParser.java b/src/main/java/io/lettuce/core/search/SpellCheckResultParser.java index bb0eb6ff26..69951bffea 100644 --- a/src/main/java/io/lettuce/core/search/SpellCheckResultParser.java +++ b/src/main/java/io/lettuce/core/search/SpellCheckResultParser.java @@ -51,11 +51,11 @@ public SpellCheckResult parse(ComplexData data) { return new SpellCheckResult<>(); } - try { + if (data.isList()) { return new SpellCheckResp2Parser().parse(data); - } catch (UnsupportedOperationException e) { - return new SpellCheckResp3Parser().parse(data); } + + return new SpellCheckResp3Parser().parse(data); } /** @@ -191,8 +191,8 @@ private List> parseSuggestions(List sugge List suggestionData = ((ComplexData) suggestionObj).getDynamicList(); if (suggestionData.size() != 2) { - throw new IllegalArgumentException( - "Failed while parsing FT.SPELLCHECK: each suggestion must have 2 parts [score, suggestion]"); + LOG.warn("Failed while parsing FT.SPELLCHECK: each suggestion must have 2 parts"); + continue; } // First element is the score diff --git a/src/main/java/io/lettuce/core/search/SynonymMapParser.java b/src/main/java/io/lettuce/core/search/SynonymMapParser.java index cb7b75b4ab..d68b11ef23 100644 --- a/src/main/java/io/lettuce/core/search/SynonymMapParser.java +++ b/src/main/java/io/lettuce/core/search/SynonymMapParser.java @@ -7,7 +7,6 @@ package io.lettuce.core.search; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -55,13 +54,11 @@ public Map> parse(ComplexData data) { return new LinkedHashMap<>(); } - try { - // Try RESP2 parsing first (array-based) + if (data.isList()) { return parseResp2(data); - } catch (UnsupportedOperationException e) { - // Automatically switch to RESP3 parsing if we encounter a ComplexData type different than an array - return parseResp3(data); } + + return parseResp3(data); } /** diff --git a/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java b/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java index 06d20a36a4..70aa17bbe9 100644 --- a/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java +++ b/src/main/java/io/lettuce/core/search/arguments/AggregateArgs.java @@ -86,15 +86,11 @@ public class AggregateArgs { private Optional timeout = Optional.empty(); - private final List> groupByList = new ArrayList<>(); - - private final List> sortByList = new ArrayList<>(); - - private final List> applyList = new ArrayList<>(); - - private Optional limit = Optional.empty(); - - private final List filters = new ArrayList<>(); + /** + * Ordered list of pipeline operations (GROUPBY, SORTBY, APPLY, FILTER). These operations must be applied in the order + * specified by the user. + */ + private final List> pipelineOperations = new ArrayList<>(); private Optional withCursor = Optional.empty(); @@ -214,7 +210,7 @@ public Builder timeout(Duration timeout) { * @return the builder. */ public Builder groupBy(GroupBy groupBy) { - args.groupByList.add(groupBy); + args.pipelineOperations.add(groupBy); return this; } @@ -225,7 +221,7 @@ public Builder groupBy(GroupBy groupBy) { * @return the builder. */ public Builder sortBy(SortBy sortBy) { - args.sortByList.add(sortBy); + args.pipelineOperations.add(sortBy); return this; } @@ -236,7 +232,7 @@ public Builder sortBy(SortBy sortBy) { * @return the builder. */ public Builder apply(Apply apply) { - args.applyList.add(apply); + args.pipelineOperations.add(apply); return this; } @@ -268,7 +264,7 @@ public Builder apply(Apply apply) { * @return the builder. */ public Builder limit(long offset, long num) { - args.limit = Optional.of(new Limit(offset, num)); + args.pipelineOperations.add(new Limit<>(offset, num)); return this; } @@ -302,7 +298,7 @@ public Builder limit(long offset, long num) { * @return the builder. */ public Builder filter(V filter) { - args.filters.add(filter); + args.pipelineOperations.add(new Filter<>(filter)); return this; } @@ -513,32 +509,12 @@ public void build(CommandArgs args) { args.add(t.toMillis()); }); - // Add GROUPBY clauses - for (GroupBy groupBy : groupByList) { - groupBy.build(args); - } - - // Add SORTBY clauses - for (SortBy sortBy : sortByList) { - sortBy.build(args); - } - - // Add APPLY clauses - for (Apply apply : applyList) { - apply.build(args); - } - - // Add LIMIT clause - limit.ifPresent(l -> { - args.add(CommandKeyword.LIMIT); - args.add(l.offset); - args.add(l.num); - }); - - // Add FILTER clauses - for (V filter : filters) { - args.add(CommandKeyword.FILTER); - args.addValue(filter); + // Add pipeline operations in user-specified order + for (PipelineOperation operation : pipelineOperations) { + // Cast is safe because all operations can build with CommandArgs + @SuppressWarnings("unchecked") + PipelineOperation typedOperation = (PipelineOperation) operation; + typedOperation.build(args); } // Add WITHCURSOR clause @@ -578,6 +554,21 @@ public Optional getWithCursor() { return withCursor; } + /** + * Interface for pipeline operations that need to be applied in user-specified order. This includes GROUPBY, SORTBY, APPLY, + * and FILTER operations. + */ + public interface PipelineOperation { + + /** + * Build the operation arguments into the command args. + * + * @param args the command args to build into + */ + void build(CommandArgs args); + + } + // Helper classes public static class LoadField { @@ -592,7 +583,7 @@ public static class LoadField { } - public static class Limit { + public static class Limit implements PipelineOperation { final long offset; @@ -603,6 +594,13 @@ public static class Limit { this.num = num; } + @Override + public void build(CommandArgs args) { + args.add(CommandKeyword.LIMIT); + args.add(offset); + args.add(num); + } + } public static class WithCursor { @@ -676,7 +674,7 @@ public static WithCursor of(Long count) { * performance. *

*/ - public static class GroupBy { + public static class GroupBy implements PipelineOperation { private final List properties; @@ -705,6 +703,7 @@ public static GroupBy of(K... properties) { return new GroupBy<>(Arrays.asList(properties)); } + @Override public void build(CommandArgs args) { args.add(CommandKeyword.GROUPBY); args.add(properties.size()); @@ -764,7 +763,7 @@ public void build(CommandArgs args) { * using LIMIT. *

*/ - public static class SortBy { + public static class SortBy implements PipelineOperation { private final List> properties; @@ -810,7 +809,8 @@ public static SortBy of(SortProperty... properties) { return new SortBy<>(Arrays.asList(properties)); } - public void build(CommandArgs args) { + @Override + public void build(CommandArgs args) { args.add(CommandKeyword.SORTBY); // Count includes property + direction pairs args.add(properties.size() * 2L); @@ -880,7 +880,7 @@ public void build(CommandArgs args) { * can be referenced by further operations. *

*/ - public static class Apply { + public static class Apply implements PipelineOperation { private final V expression; @@ -891,6 +891,7 @@ public Apply(V expression, K name) { this.name = name; } + @Override public void build(CommandArgs args) { args.add(CommandKeyword.APPLY); args.addValue(expression); @@ -1065,6 +1066,31 @@ public void build(CommandArgs args) { } + /** + * Represents a FILTER clause in an aggregation pipeline. + * + *

+ * Filters the results using predicate expressions relating to values in each result. Filters are applied after the query + * and relate to the current state of the pipeline. This allows filtering on computed fields created by APPLY operations or + * reducer results. + *

+ */ + public static class Filter implements PipelineOperation { + + private final V expression; + + public Filter(V expression) { + this.expression = expression; + } + + @Override + public void build(CommandArgs args) { + args.add(CommandKeyword.FILTER); + args.addValue(expression); + } + + } + /** * Represents a sort property with direction. */ diff --git a/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java b/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java index 94240c1f04..6e06cae4d2 100644 --- a/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java +++ b/src/main/java/io/lettuce/core/search/arguments/CreateArgs.java @@ -52,19 +52,19 @@ public enum TargetType { private Optional payloadField = Optional.empty(); - private boolean maxTextFields; + private boolean maxTextFields = false; private OptionalLong temporary = OptionalLong.empty(); - private boolean noOffsets; + private boolean noOffsets = false; - private boolean noHighlight; + private boolean noHighlight = false; - private boolean noFields; + private boolean noFields = false; - private boolean noFrequency; + private boolean noFrequency = false; - private boolean skipInitialScan; + private boolean skipInitialScan = false; private Optional> stopWords = Optional.empty(); @@ -108,9 +108,8 @@ public Builder on(TargetType targetType) { * * @param prefix the prefix * @return the instance of the current {@link Builder} for the purpose of method chaining - * @see {@link Builder#addPrefixes(List)} */ - public Builder addPrefix(K prefix) { + public Builder withPrefix(K prefix) { instance.prefixes.add(prefix); return this; } @@ -121,7 +120,7 @@ public Builder addPrefix(K prefix) { * @param prefixes a {@link List} of prefixes * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder addPrefixes(List prefixes) { + public Builder withPrefixes(List prefixes) { instance.prefixes.addAll(prefixes); return this; } @@ -212,12 +211,11 @@ public Builder payloadField(K field) { * Forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional * attributes (beyond 32) using FT.ALTER. For efficiency, RediSearch encodes indexes differently if they are created * with less than 32 text attributes. - * - * @param maxTextFields the maximum number of text fields + * * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder maxTextFields(boolean maxTextFields) { - instance.maxTextFields = maxTextFields; + public Builder maxTextFields() { + instance.maxTextFields = true; return this; } @@ -227,7 +225,7 @@ public Builder maxTextFields(boolean maxTextFields) { * Creates a lightweight temporary index that expires after a specified period of inactivity, in seconds. The internal * idle timer is reset whenever the index is searched or added to. Because such indexes are lightweight, you can create * thousands of such indexes without negative performance implications and, therefore, you should consider using - * {@link Builder#skipInitialScan(boolean)} to avoid costly scanning. + * {@link Builder#skipInitialScan()} to avoid costly scanning. *

* Warning: When temporary indexes expire, they drop all the records associated with them. FT.DROPINDEX was introduced * with a default of not deleting docs and a DD flag that enforced deletion. However, for temporary indexes, documents @@ -246,14 +244,13 @@ public Builder temporary(long seconds) { /** * Set the no offsets flag. The default setting is to have offsets. *

- * It saves memory, but does not allow exact searches or highlighting. It implies - * {@link Builder#noHighlighting(boolean)} is set to true. - * - * @param noOffsets the no offsets flag + * It saves memory, but does not allow exact searches or highlighting. It implies {@link Builder#noHighlighting()} is + * set to true. + * * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder noOffsets(boolean noOffsets) { - instance.noOffsets = noOffsets; + public Builder noOffsets() { + instance.noOffsets = true; return this; } @@ -262,12 +259,11 @@ public Builder noOffsets(boolean noOffsets) { *

* Conserves storage space and memory by disabling highlighting support. If set, the corresponding byte offsets for term * positions are not stored. NOHL is also implied by NOOFFSETS. - * - * @param noHL the no highlighting flag + * * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder noHighlighting(boolean noHL) { - instance.noHighlight = noHL; + public Builder noHighlighting() { + instance.noHighlight = true; return this; } @@ -276,11 +272,10 @@ public Builder noHighlighting(boolean noHL) { *

* Does not store attribute bits for each term. It saves memory, but it does not allow filtering by specific attributes. * - * @param noFields the no fields flag * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder noFields(boolean noFields) { - instance.noFields = noFields; + public Builder noFields() { + instance.noFields = true; return this; } @@ -290,22 +285,20 @@ public Builder noFields(boolean noFields) { * Does not store the frequency of each term. It saves memory, but it does not allow sorting by frequency of a given * term. * - * @param noFreqs the no frequency flag * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder noFrequency(boolean noFreqs) { - instance.noFrequency = noFreqs; + public Builder noFrequency() { + instance.noFrequency = true; return this; } /** * Set the skip initial scan flag. The default setting is to scan initially. * - * @param skipInitialScan the skip initial scan flag * @return the instance of the current {@link Builder} for the purpose of method chaining */ - public Builder skipInitialScan(boolean skipInitialScan) { - instance.skipInitialScan = skipInitialScan; + public Builder skipInitialScan() { + instance.skipInitialScan = true; return this; } @@ -345,8 +338,8 @@ public Optional getOn() { * Get the prefixes for the index. * * @return the prefixes - * @see Builder#addPrefix(Object) - * @see Builder#addPrefixes(List) + * @see Builder#withPrefix(Object) + * @see Builder#withPrefixes(List) */ public List getPrefixes() { return prefixes; @@ -416,7 +409,7 @@ public Optional getPayloadField() { * Get the maximum number of text fields in the index. * * @return the maximum number of text fields - * @see Builder#maxTextFields(boolean) + * @see Builder#maxTextFields() */ public boolean isMaxTextFields() { return maxTextFields; @@ -436,7 +429,7 @@ public OptionalLong getTemporary() { * Get the no offsets flag. * * @return the no offsets flag - * @see Builder#noOffsets(boolean) + * @see Builder#noOffsets() */ public boolean isNoOffsets() { return noOffsets; @@ -446,7 +439,7 @@ public boolean isNoOffsets() { * Get the no highlighting flag. * * @return the no highlighting flag - * @see Builder#noHighlighting(boolean) + * @see Builder#noHighlighting() */ public boolean isNoHighlight() { return noHighlight; @@ -456,7 +449,7 @@ public boolean isNoHighlight() { * Get the no fields flag. * * @return the no fields flag - * @see Builder#noFields(boolean) + * @see Builder#noFields() */ public boolean isNoFields() { return noFields; @@ -466,7 +459,7 @@ public boolean isNoFields() { * Get the no frequency flag. * * @return the no frequency flag - * @see Builder#noFrequency(boolean) + * @see Builder#noFrequency() */ public boolean isNoFrequency() { return noFrequency; @@ -476,7 +469,7 @@ public boolean isNoFrequency() { * Get the skip initial scan flag. * * @return the skip initial scan flag - * @see Builder#skipInitialScan(boolean) + * @see Builder#skipInitialScan() */ public boolean isSkipInitialScan() { return skipInitialScan; diff --git a/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java b/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java index ac8ee74b2b..6400724b22 100644 --- a/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java +++ b/src/main/java/io/lettuce/core/search/arguments/SearchArgs.java @@ -16,7 +16,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.OptionalLong; /** * Argument list builder for {@code FT.SEARCH}. @@ -30,23 +29,13 @@ @SuppressWarnings("OptionalUsedAsFieldOrParameterType") public class SearchArgs { - private Optional noContent = Optional.empty(); + private boolean noContent = false; - private Optional verbatim = Optional.empty(); + private boolean verbatim = false; - private Optional noStopWords = Optional.empty(); + private boolean withScores = false; - private Optional withScores = Optional.empty(); - - private Optional withPayloads = Optional.empty(); - - private Optional withSortKeys = Optional.empty(); - - // FIXME verify if we need to support this, deprecated since 2.10 - // private List> filters = new ArrayList<>(); - - // FIXME verify if we need to support this, deprecated since 2.6 - // private Optional> geoFilter = Optional.empty(); + private boolean withSortKeys = false; private final List inKeys = new ArrayList<>(); @@ -58,9 +47,9 @@ public class SearchArgs { private Optional> highlight = Optional.empty(); - private OptionalLong slop = OptionalLong.empty(); + private Long slop; - private Optional inOrder = Optional.empty(); + private boolean inOrder = false; private Optional language = Optional.empty(); @@ -68,11 +57,6 @@ public class SearchArgs { private Optional scorer = Optional.empty(); - // FIXME verify if we want to support this - // private Optional explainScore = Optional.empty(); - - private Optional payload = Optional.empty(); - private Optional> sortBy = Optional.empty(); private Optional limit = Optional.empty(); @@ -108,12 +92,24 @@ public static class Builder { private final SearchArgs instance = new SearchArgs<>(); + private SummarizeArgs.Builder summarizeArgs; + + private HighlightArgs.Builder highlightArgs; + /** * Build a new instance of the {@link SearchArgs}. * * @return a new instance of the {@link SearchArgs} */ public SearchArgs build() { + if (!instance.summarize.isPresent() && summarizeArgs != null) { + instance.summarize = Optional.of(summarizeArgs.build()); + } + + if (!instance.highlight.isPresent() && highlightArgs != null) { + instance.highlight = Optional.of(highlightArgs.build()); + } + return instance; } @@ -124,7 +120,7 @@ public SearchArgs build() { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder noContent() { - instance.noContent = Optional.of(true); + instance.noContent = true; return this; } @@ -134,17 +130,7 @@ public SearchArgs.Builder noContent() { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder verbatim() { - instance.verbatim = Optional.of(true); - return this; - } - - /** - * Ignore any defined stop words in full text searches. Disabled by default. - * - * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining - */ - public SearchArgs.Builder noStopWords() { - instance.noStopWords = Optional.of(true); + instance.verbatim = true; return this; } @@ -155,19 +141,7 @@ public SearchArgs.Builder noStopWords() { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder withScores() { - instance.withScores = Optional.of(true); - return this; - } - - /** - * Retrieve optional document payloads. The payloads follow the document id and, if - * {@link SearchArgs.Builder#withScores} is set, the scores. Disabled by default. - * - * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining - * @see FT.CREATE - */ - public SearchArgs.Builder withPayloads() { - instance.withPayloads = Optional.of(true); + instance.withScores = true; return this; } @@ -179,7 +153,7 @@ public SearchArgs.Builder withPayloads() { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder withSortKeys() { - instance.withSortKeys = Optional.of(true); + instance.withSortKeys = true; return this; } @@ -239,11 +213,95 @@ public SearchArgs.Builder returnField(K field) { * @see Highlighting */ - public SearchArgs.Builder summarize(SummarizeArgs summarizeFilter) { + public SearchArgs.Builder summarizeArgs(SummarizeArgs summarizeFilter) { instance.summarize = Optional.ofNullable(summarizeFilter); return this; } + /** + * Convenience method to build {@link SummarizeArgs} + *

+ * Add a field to summarize. Each field is summarized. If no FIELDS directive is passed, then all returned fields are + * summarized. + * + * @param field the field to add + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder summarizeField(K field) { + if (summarizeArgs == null) { + summarizeArgs = new SummarizeArgs.Builder<>(); + } + + summarizeArgs.field(field); + + return this; + } + + /** + * Convenience method to build {@link SummarizeArgs} + *

+ * Set the number of context words each fragment should contain. Context words surround the found term. A higher value + * will return a larger block of text. If not specified, the default value is 20. + * + * @param len the field to add + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder summarizeLen(long len) { + if (summarizeArgs == null) { + summarizeArgs = new SummarizeArgs.Builder<>(); + } + + summarizeArgs.len(len); + + return this; + } + + /** + * Convenience method to build {@link SummarizeArgs} + *

+ * The string used to divide individual summary snippets. The default is ... which is common among search + * engines, but you may override this with any other string if you desire to programmatically divide the snippets later + * on. You may also use a newline sequence, as newlines are stripped from the result body during processing. + * + * @param separator the separator between fragments + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder summarizeSeparator(V separator) { + if (summarizeArgs == null) { + summarizeArgs = new SummarizeArgs.Builder<>(); + } + + summarizeArgs.separator(separator); + + return this; + } + + /** + * Convenience method to build {@link SummarizeArgs} + *

+ * Set the number of fragments to be returned. If not specified, the default is 3. + * + * @param fragments the number of fragments to return + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder summarizeFragments(long fragments) { + if (summarizeArgs == null) { + summarizeArgs = new SummarizeArgs.Builder<>(); + } + + summarizeArgs.fragments(fragments); + + return this; + } + /** * Format occurrences of matched text. * @@ -252,11 +310,53 @@ public SearchArgs.Builder summarize(SummarizeArgs summarizeFilter) { * @see Highlighting */ - public SearchArgs.Builder highlight(HighlightArgs highlightFilter) { + public SearchArgs.Builder highlightArgs(HighlightArgs highlightFilter) { instance.highlight = Optional.ofNullable(highlightFilter); return this; } + /** + * Convenience method to build {@link HighlightArgs} + *

+ * Add a field to highlight. If no FIELDS directive is passed, then all returned fields are highlighted. + * + * @param field the field to summarize + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder highlightField(K field) { + if (highlightArgs == null) { + highlightArgs = new HighlightArgs.Builder<>(); + } + + highlightArgs.field(field); + + return this; + } + + /** + * Convenience method to build {@link HighlightArgs} + *

+ * Tags to surround the matched terms with. If no TAGS are specified, a built-in tag pair is prepended and appended to + * each matched term. + * + * @param startTag the string is prepended to each matched term + * @param endTag the string is appended to each matched term + * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining + * @see Highlighting + */ + public SearchArgs.Builder highlightTags(V startTag, V endTag) { + if (highlightArgs == null) { + highlightArgs = new HighlightArgs.Builder<>(); + } + + highlightArgs.tags(startTag, endTag); + + return this; + } + /** * Allow for a number of intermediate terms allowed to appear between the terms of the query. Suppose you're searching * for a phrase hello world, if some other terms appear in-between hello and @@ -267,7 +367,7 @@ public SearchArgs.Builder highlight(HighlightArgs highlightFilter) { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder slop(long slop) { - instance.slop = OptionalLong.of(slop); + instance.slop = slop; return this; } @@ -278,7 +378,7 @@ public SearchArgs.Builder slop(long slop) { * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining */ public SearchArgs.Builder inOrder() { - instance.inOrder = Optional.of(true); + instance.inOrder = true; return this; } @@ -323,30 +423,6 @@ public SearchArgs.Builder scorer(ScoringFunction scorer) { return this; } - // /** - // * Return a textual description of how the scores were calculated. Using this option requires - // * {@link Builder#withScores()}. - // * - // * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining - // */ - // public SearchArgs.Builder explainScore() { - // instance.explainScore = Optional.of(true); - // return this; - // } - - /** - * Add an arbitrary, binary safe payload exposed to custom scoring functions. - * - * @param payload the payload to return - * @return the instance of the current {@link SearchArgs.Builder} for the purpose of method chaining - * @see Extensions - */ - public SearchArgs.Builder payload(V payload) { - instance.payload = Optional.ofNullable(payload); - return this; - } - /** * Order the results by the value of this attribute. This applies to both text and numeric attributes. Attributes needed * for SORTBY should be declared as SORTABLE in the index, to be available with very low latency. @@ -424,7 +500,7 @@ public SearchArgs.Builder dialect(QueryDialects dialect) { * @return true if NOCONTENT is enabled, false otherwise */ public boolean isNoContent() { - return noContent.orElse(false); + return noContent; } /** @@ -433,16 +509,7 @@ public boolean isNoContent() { * @return true if WITHSCORES is enabled, false otherwise */ public boolean isWithScores() { - return withScores.orElse(false); - } - - /** - * Gets whether the WITHPAYLOADS option is enabled. - * - * @return true if WITHPAYLOADS is enabled, false otherwise - */ - public boolean isWithPayloads() { - return withPayloads.orElse(false); + return withScores; } /** @@ -451,7 +518,7 @@ public boolean isWithPayloads() { * @return true if WITHSORTKEYS is enabled, false otherwise */ public boolean isWithSortKeys() { - return withSortKeys.orElse(false); + return withSortKeys; } /** @@ -461,12 +528,21 @@ public boolean isWithSortKeys() { */ public void build(CommandArgs args) { - noContent.ifPresent(v -> args.add(CommandKeyword.NOCONTENT)); - verbatim.ifPresent(v -> args.add(CommandKeyword.VERBATIM)); - noStopWords.ifPresent(v -> args.add(CommandKeyword.NOSTOPWORDS)); - withScores.ifPresent(v -> args.add(CommandKeyword.WITHSCORES)); - withPayloads.ifPresent(v -> args.add(CommandKeyword.WITHPAYLOADS)); - withSortKeys.ifPresent(v -> args.add(CommandKeyword.WITHSORTKEYS)); + if (noContent) { + args.add(CommandKeyword.NOCONTENT); + } + + if (verbatim) { + args.add(CommandKeyword.VERBATIM); + } + + if (withScores) { + args.add(CommandKeyword.WITHSCORES); + } + + if (withSortKeys) { + args.add(CommandKeyword.WITHSORTKEYS); + } if (!inKeys.isEmpty()) { args.add(CommandKeyword.INKEYS); @@ -489,25 +565,22 @@ public void build(CommandArgs args) { }); } - summarize.ifPresent(summarizeArgs -> { - summarizeArgs.build(args); - }); - - highlight.ifPresent(highlightArgs -> { - highlightArgs.build(args); - }); + summarize.ifPresent(summarizeArgs -> summarizeArgs.build(args)); + highlight.ifPresent(highlightArgs -> highlightArgs.build(args)); - slop.ifPresent(v -> { + if (slop != null) { args.add(CommandKeyword.SLOP); - args.add(v); - }); + args.add(slop); + } timeout.ifPresent(timeoutDuration -> { args.add(CommandKeyword.TIMEOUT); args.add(timeoutDuration.toMillis()); }); - inOrder.ifPresent(v -> args.add(CommandKeyword.INORDER)); + if (inOrder) { + args.add(CommandKeyword.INORDER); + } language.ifPresent(documentLanguage -> { args.add(CommandKeyword.LANGUAGE); @@ -524,16 +597,7 @@ public void build(CommandArgs args) { args.add(scoringFunction.toString()); }); - // explainScore.ifPresent(v -> args.add(CommandKeyword.EXPLAINSCORE)); - - payload.ifPresent(v -> { - args.add(CommandKeyword.PAYLOAD); - args.addValue(v); - }); - - sortBy.ifPresent(sortByArgs -> { - sortByArgs.build(args); - }); + sortBy.ifPresent(sortByArgs -> sortByArgs.build(args)); limit.ifPresent(limitArgs -> { args.add(CommandKeyword.LIMIT); diff --git a/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java b/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java index 2bb208e217..5a1c7d551c 100644 --- a/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java +++ b/src/main/java/io/lettuce/core/search/arguments/VectorFieldArgs.java @@ -50,6 +50,14 @@ public enum Algorithm { * Vector data types. */ public enum VectorType { + /** + * 16-bit brain floating point. Requires RediSearch v2.10 or later. + */ + BFLOAT16, + /** + * 16-bit floating point. Requires RediSearch v2.10 or later. + */ + FLOAT16, /** * 32-bit floating point. */ diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt index 385f610220..cb12e6d9f4 100644 --- a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt @@ -32,7 +32,7 @@ import io.lettuce.core.search.arguments.SynUpdateArgs * @author Tihomir Mateev * @see RediSearch * @since 6.8 - * @generated by io.lettuce.apigenerator.CreateKotlinCoroutinesApi + * @generated by io.lettuce.apigenerator.CreateKotlinCoroutinesApi */ @ExperimentalLettuceCoroutinesApi interface RediSearchCoroutinesCommands { @@ -626,10 +626,6 @@ interface RediSearchCoroutinesCommands { @Experimental suspend fun ftList(): List - - - - /** * Dump synonym group contents. * diff --git a/src/test/java/io/lettuce/apigenerator/CreateAsyncNodeSelectionClusterApi.java b/src/test/java/io/lettuce/apigenerator/CreateAsyncNodeSelectionClusterApi.java index 577c1b8f59..410fb78eaf 100644 --- a/src/test/java/io/lettuce/apigenerator/CreateAsyncNodeSelectionClusterApi.java +++ b/src/test/java/io/lettuce/apigenerator/CreateAsyncNodeSelectionClusterApi.java @@ -105,6 +105,9 @@ static List arguments() { private CompilationUnitFactory createFactory(String templateName) { String targetName = templateName.replace("Commands", "AsyncCommands").replace("Redis", "NodeSelection"); + if (!targetName.contains("NodeSelection")) { + targetName = targetName.replace("Redi", "NodeSelection"); + } File templateFile = new File(Constants.TEMPLATES, "io/lettuce/core/api/" + templateName + ".java"); String targetPackage = "io.lettuce.core.cluster.api.async"; diff --git a/src/test/java/io/lettuce/apigenerator/CreateSyncNodeSelectionClusterApi.java b/src/test/java/io/lettuce/apigenerator/CreateSyncNodeSelectionClusterApi.java index 6f051d2fc3..4756a99c6c 100644 --- a/src/test/java/io/lettuce/apigenerator/CreateSyncNodeSelectionClusterApi.java +++ b/src/test/java/io/lettuce/apigenerator/CreateSyncNodeSelectionClusterApi.java @@ -112,6 +112,9 @@ static List arguments() { private CompilationUnitFactory createFactory(String templateName) { String targetName = templateName.replace("Redis", "NodeSelection"); + if (targetName.equals(templateName)) { + targetName = templateName.replace("Redi", "NodeSelection"); + } File templateFile = new File(Constants.TEMPLATES, "io/lettuce/core/api/" + templateName + ".java"); String targetPackage = "io.lettuce.core.cluster.api.sync"; diff --git a/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java b/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java index 973d089155..d342bae4ec 100644 --- a/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java +++ b/src/test/java/io/lettuce/core/RediSearchCommandBuilderUnitTests.java @@ -78,7 +78,7 @@ void shouldCorrectlyConstructFtCreateCommandScenario1() { FieldArgs fieldArgs2 = NumericFieldArgs. builder().name(FIELD2_NAME).sortable().build(); FieldArgs fieldArgs3 = TagFieldArgs. builder().name(FIELD3_NAME).sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(PREFIX) .on(CreateArgs.TargetType.HASH).build(); Command command = builder.ftCreate(MY_KEY, createArgs, Arrays.asList(fieldArgs1, fieldArgs2, fieldArgs3)); @@ -113,7 +113,7 @@ void shouldCorrectlyConstructFtCreateCommandScenario2() { FieldArgs fieldArgs1 = TextFieldArgs. builder().name(FIELD4_NAME).as(FIELD4_ALIAS1).build(); FieldArgs fieldArgs2 = TagFieldArgs. builder().name(FIELD4_NAME).as(FIELD4_ALIAS2).sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(PREFIX) .on(CreateArgs.TargetType.HASH).build(); Command command = builder.ftCreate(MY_KEY, createArgs, Arrays.asList(fieldArgs1, fieldArgs2)); ByteBuf buf = Unpooled.directBuffer(); @@ -635,6 +635,39 @@ void shouldCorrectlyConstructFtAggregateCommandBasic() { assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); } + @Test + void shouldMaintainPipelineOperationOrder() { + // Test that pipeline operations (GROUPBY, SORTBY, APPLY, FILTER, LIMIT) + // are output in the order specified by the user, not in a fixed order + AggregateArgs aggregateArgs = AggregateArgs. builder()// + .apply("@price * @quantity", "total_value")// First operation + .filter("@total_value > 100")// Second operation + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("count")))// Third + // operation + .limit(0, 5)// Fourth operation + .sortBy(SortBy.of("count", SortDirection.DESC))// Fifth operation + .build(); + + Command> command = builder.ftAggregate(MY_KEY, MY_QUERY, + aggregateArgs); + ByteBuf buf = Unpooled.directBuffer(); + command.encode(buf); + + // Expected order should match the user's call order: APPLY -> FILTER -> GROUPBY -> LIMIT -> SORTBY + String result = "*26\r\n" + "$12\r\n" + "FT.AGGREGATE\r\n" + "$3\r\n" + "idx\r\n" + "$1\r\n" + "*\r\n"// + + "$5\r\n" + "APPLY\r\n" + "$18\r\n" + "@price * @quantity\r\n" + "$2\r\n" + "AS\r\n" + "$11\r\n" + + "total_value\r\n"// + + "$6\r\n" + "FILTER\r\n" + "$18\r\n" + "@total_value > 100\r\n"// + + "$7\r\n" + "GROUPBY\r\n" + "$1\r\n" + "1\r\n" + "$9\r\n" + "@category\r\n"// + + "$6\r\n" + "REDUCE\r\n" + "$5\r\n" + "COUNT\r\n" + "$1\r\n" + "0\r\n" + "$2\r\n" + "AS\r\n" + "$5\r\n" + + "count\r\n"// + + "$5\r\n" + "LIMIT\r\n" + "$1\r\n" + "0\r\n" + "$1\r\n" + "5\r\n"// + + "$6\r\n" + "SORTBY\r\n" + "$1\r\n" + "2\r\n" + "$6\r\n" + "@count\r\n" + "$4\r\n" + "DESC\r\n"// + + "$7\r\n" + "DIALECT\r\n" + "$1\r\n2\r\n";// + + assertThat(buf.toString(StandardCharsets.UTF_8)).isEqualTo(result); + } + @Test void shouldCorrectlyConstructFtAggregateCommandWithArgs() { AggregateArgs aggregateArgs = AggregateArgs. builder()// diff --git a/src/test/java/io/lettuce/core/output/SpellCheckResultParserUnitTests.java b/src/test/java/io/lettuce/core/output/SpellCheckResultParserUnitTests.java index 7d194efcca..5b07b20251 100644 --- a/src/test/java/io/lettuce/core/output/SpellCheckResultParserUnitTests.java +++ b/src/test/java/io/lettuce/core/output/SpellCheckResultParserUnitTests.java @@ -8,7 +8,6 @@ import static io.lettuce.TestTags.UNIT_TEST; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import io.lettuce.core.codec.StringCodec; import org.junit.jupiter.api.Tag; @@ -216,8 +215,7 @@ void shouldThrowExceptionForInvalidSuggestionFormat() { termArray.storeObject(suggestionsArray); data.storeObject(termArray); - assertThatThrownBy(() -> parser.parse(data)).isInstanceOf(IllegalArgumentException.class) - .hasMessage("Failed while parsing FT.SPELLCHECK: each suggestion must have 2 parts [score, suggestion]"); + assertThat(parser.parse(data).getMisspelledTerms().get(0).getSuggestions()).isEmpty(); } } diff --git a/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java index 107b3dacfa..4c600c5af5 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchAdvancedConceptsIntegrationTests.java @@ -109,7 +109,7 @@ void testStopWordsManagement() { FieldArgs titleField = TextFieldArgs. builder().name("title").build(); FieldArgs contentField = TextFieldArgs. builder().name("content").build(); - CreateArgs customStopWordsArgs = CreateArgs. builder().addPrefix(ARTICLE_PREFIX) + CreateArgs customStopWordsArgs = CreateArgs. builder().withPrefix(ARTICLE_PREFIX) .on(CreateArgs.TargetType.HASH).stopWords(Arrays.asList("foo", "bar", "baz")).build(); redis.ftCreate(STOPWORDS_INDEX, customStopWordsArgs, Arrays.asList(titleField, contentField)); @@ -157,7 +157,7 @@ void testTokenizationAndEscaping() { // Create index for testing tokenization FieldArgs textField = TextFieldArgs. builder().name("text").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(DOCUMENT_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(DOCUMENT_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(TOKENIZATION_INDEX, createArgs, Collections.singletonList(textField)); @@ -217,7 +217,7 @@ void testSortingByIndexedFields() { FieldArgs lastNameField = TextFieldArgs. builder().name("last_name").sortable().build(); FieldArgs ageField = NumericFieldArgs. builder().name("age").sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(USER_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(USER_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(SORTING_INDEX, createArgs, Arrays.asList(firstNameField, lastNameField, ageField)); @@ -279,7 +279,7 @@ void testTagFieldOperations() { FieldArgs categoriesField = TagFieldArgs. builder().name("categories").separator(";").build(); FieldArgs tagsField = TagFieldArgs. builder().name("tags").caseSensitive().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(PRODUCT_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(TAGS_INDEX, createArgs, Arrays.asList(titleField, categoriesField, tagsField)); @@ -352,7 +352,7 @@ void testHighlightingAndSummarization() { FieldArgs contentField = TextFieldArgs. builder().name("content").build(); FieldArgs authorField = TextFieldArgs. builder().name("author").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(BOOK_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(BOOK_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(HIGHLIGHT_INDEX, createArgs, Arrays.asList(titleField, contentField, authorField)); @@ -381,7 +381,7 @@ void testHighlightingAndSummarization() { // Test 1: Basic highlighting with default tags HighlightArgs basicHighlight = HighlightArgs. builder().build(); - SearchArgs highlightArgs = SearchArgs. builder().highlight(basicHighlight).build(); + SearchArgs highlightArgs = SearchArgs. builder().highlightArgs(basicHighlight).build(); SearchReply results = redis.ftSearch(HIGHLIGHT_INDEX, "Redis", highlightArgs); assertThat(results.getCount()).isEqualTo(1); @@ -391,10 +391,8 @@ void testHighlightingAndSummarization() { assertThat(highlightedContent).contains("Redis"); // Default highlighting tags // Test 2: Custom highlighting tags - HighlightArgs customHighlight = HighlightArgs. builder().field("title").field("content") - .tags("", "").build(); - SearchArgs customHighlightArgs = SearchArgs. builder().highlight(customHighlight) - .build(); + SearchArgs customHighlightArgs = SearchArgs. builder().highlightField("title") + .highlightField("content").highlightTags("", "").build(); results = redis.ftSearch(HIGHLIGHT_INDEX, "database", customHighlightArgs); assertThat(results.getCount()).isEqualTo(2); @@ -410,7 +408,7 @@ void testHighlightingAndSummarization() { // Test 3: Summarization with custom parameters SummarizeArgs summarize = SummarizeArgs. builder().field("content").fragments(2).len(25) .separator(" ... ").build(); - SearchArgs summarizeArgs = SearchArgs. builder().summarize(summarize).build(); + SearchArgs summarizeArgs = SearchArgs. builder().summarizeArgs(summarize).build(); results = redis.ftSearch(HIGHLIGHT_INDEX, "patterns", summarizeArgs); assertThat(results.getCount()).isEqualTo(1); @@ -423,10 +421,8 @@ void testHighlightingAndSummarization() { // Test 4: Combined highlighting and summarization HighlightArgs combineHighlight = HighlightArgs. builder().field("content") .tags("**", "**").build(); - SummarizeArgs combineSummarize = SummarizeArgs. builder().field("content").fragments(1) - .len(30).build(); - SearchArgs combinedArgs = SearchArgs. builder().highlight(combineHighlight) - .summarize(combineSummarize).build(); + SearchArgs combinedArgs = SearchArgs. builder().highlightArgs(combineHighlight) + .summarizeField("content").summarizeFragments(1).summarizeLen(30).build(); results = redis.ftSearch(HIGHLIGHT_INDEX, "Redis data", combinedArgs); assertThat(results.getCount()).isEqualTo(1); @@ -451,7 +447,7 @@ void testDocumentScoring() { TextFieldArgs contentField = TextFieldArgs. builder().name("content").build(); NumericFieldArgs ratingField = NumericFieldArgs. builder().name("rating").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(REVIEW_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(REVIEW_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(SCORING_INDEX, createArgs, Arrays.asList(titleField, contentField, ratingField)); @@ -531,7 +527,7 @@ void testStemmingAndLanguageSupport() { // Test 1: English stemming FieldArgs englishWordField = TextFieldArgs. builder().name("word").build(); - CreateArgs englishArgs = CreateArgs. builder().addPrefix(WORD_PREFIX) + CreateArgs englishArgs = CreateArgs. builder().withPrefix(WORD_PREFIX) .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); redis.ftCreate(STEMMING_INDEX, englishArgs, Collections.singletonList(englishWordField)); @@ -584,7 +580,7 @@ void testStemmingAndLanguageSupport() { // Test 2: German stemming example from documentation FieldArgs germanWordField = TextFieldArgs. builder().name("wort").build(); - CreateArgs germanArgs = CreateArgs. builder().addPrefix("wort:") + CreateArgs germanArgs = CreateArgs. builder().withPrefix("wort:") .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.GERMAN).build(); redis.ftCreate("idx:german", germanArgs, Collections.singletonList(germanWordField)); @@ -613,7 +609,7 @@ void testPhoneticMatchers() { FieldArgs englishNameField = TextFieldArgs. builder().name("name") .phonetic(TextFieldArgs.PhoneticMatcher.ENGLISH).build(); - CreateArgs englishArgs = CreateArgs. builder().addPrefix("person:") + CreateArgs englishArgs = CreateArgs. builder().withPrefix("person:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("phonetic-en-idx", englishArgs, Collections.singletonList(englishNameField)); @@ -639,7 +635,7 @@ void testPhoneticMatchers() { FieldArgs frenchNameField = TextFieldArgs. builder().name("nom") .phonetic(TextFieldArgs.PhoneticMatcher.FRENCH).build(); - CreateArgs frenchArgs = CreateArgs. builder().addPrefix("personne:") + CreateArgs frenchArgs = CreateArgs. builder().withPrefix("personne:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("phonetic-fr-idx", frenchArgs, Collections.singletonList(frenchNameField)); @@ -663,7 +659,7 @@ void testPhoneticMatchers() { FieldArgs spanishNameField = TextFieldArgs. builder().name("nombre") .phonetic(TextFieldArgs.PhoneticMatcher.SPANISH).build(); - CreateArgs spanishArgs = CreateArgs. builder().addPrefix("persona:") + CreateArgs spanishArgs = CreateArgs. builder().withPrefix("persona:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("phonetic-es-idx", spanishArgs, Collections.singletonList(spanishNameField)); @@ -684,7 +680,7 @@ void testPhoneticMatchers() { FieldArgs portugueseNameField = TextFieldArgs. builder().name("nome") .phonetic(TextFieldArgs.PhoneticMatcher.PORTUGUESE).build(); - CreateArgs portugueseArgs = CreateArgs. builder().addPrefix("pessoa:") + CreateArgs portugueseArgs = CreateArgs. builder().withPrefix("pessoa:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("phonetic-pt-idx", portugueseArgs, Collections.singletonList(portugueseNameField)); @@ -711,7 +707,7 @@ void testNoStemmingOption() { // Test 1: Field with stemming enabled (default) FieldArgs stemmingField = TextFieldArgs. builder().name("content_stemmed").build(); - CreateArgs stemmingArgs = CreateArgs. builder().addPrefix("stem:") + CreateArgs stemmingArgs = CreateArgs. builder().withPrefix("stem:") .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); redis.ftCreate("stemming-idx", stemmingArgs, Collections.singletonList(stemmingField)); @@ -730,7 +726,7 @@ void testNoStemmingOption() { // Test 2: Field with stemming disabled FieldArgs noStemmingField = TextFieldArgs. builder().name("content_exact").noStem().build(); - CreateArgs noStemmingArgs = CreateArgs. builder().addPrefix("nostem:") + CreateArgs noStemmingArgs = CreateArgs. builder().withPrefix("nostem:") .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); redis.ftCreate("nostemming-idx", noStemmingArgs, Collections.singletonList(noStemmingField)); @@ -759,7 +755,7 @@ void testNoStemmingOption() { FieldArgs mixedStemField = TextFieldArgs. builder().name("stemmed_content").build(); FieldArgs mixedNoStemField = TextFieldArgs. builder().name("exact_content").noStem().build(); - CreateArgs mixedArgs = CreateArgs. builder().addPrefix("mixed:") + CreateArgs mixedArgs = CreateArgs. builder().withPrefix("mixed:") .on(CreateArgs.TargetType.HASH).defaultLanguage(DocumentLanguage.ENGLISH).build(); redis.ftCreate("mixed-idx", mixedArgs, Arrays.asList(mixedStemField, mixedNoStemField)); @@ -794,7 +790,7 @@ void testWithSuffixTrieOption() { // Test 1: Field without suffix trie (default) FieldArgs normalField = TextFieldArgs. builder().name("title").build(); - CreateArgs normalArgs = CreateArgs. builder().addPrefix("normal:") + CreateArgs normalArgs = CreateArgs. builder().withPrefix("normal:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("normal-idx", normalArgs, Collections.singletonList(normalField)); @@ -814,7 +810,7 @@ void testWithSuffixTrieOption() { // Test 2: Field with suffix trie enabled FieldArgs suffixTrieField = TextFieldArgs. builder().name("title").withSuffixTrie().build(); - CreateArgs suffixTrieArgs = CreateArgs. builder().addPrefix("suffix:") + CreateArgs suffixTrieArgs = CreateArgs. builder().withPrefix("suffix:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("suffix-idx", suffixTrieArgs, Collections.singletonList(suffixTrieField)); @@ -848,7 +844,7 @@ void testWithSuffixTrieOption() { // Test 3: Autocomplete-style functionality with suffix trie FieldArgs autocompleteField = TextFieldArgs. builder().name("product_name").withSuffixTrie().build(); - CreateArgs autocompleteArgs = CreateArgs. builder().addPrefix("product:") + CreateArgs autocompleteArgs = CreateArgs. builder().withPrefix("product:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("autocomplete-idx", autocompleteArgs, Collections.singletonList(autocompleteField)); @@ -886,7 +882,7 @@ void testWithSuffixTrieOption() { // Test 4: Performance comparison - complex wildcard queries FieldArgs performanceField = TextFieldArgs. builder().name("description").withSuffixTrie().build(); - CreateArgs performanceArgs = CreateArgs. builder().addPrefix("perf:") + CreateArgs performanceArgs = CreateArgs. builder().withPrefix("perf:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("performance-idx", performanceArgs, Collections.singletonList(performanceField)); diff --git a/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java index e0876a398b..fe9d712756 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchAggregateIntegrationTests.java @@ -35,6 +35,7 @@ import io.lettuce.core.search.arguments.CreateArgs; import io.lettuce.core.search.arguments.FieldArgs; import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; import io.lettuce.core.search.arguments.TextFieldArgs; /** @@ -72,7 +73,7 @@ void shouldPerformBasicAggregation() { List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), TextFieldArgs. builder().name("category").build()); - CreateArgs createArgs = CreateArgs. builder().addPrefix("doc:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("doc:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate("basic-test-idx", createArgs, fields)).isEqualTo("OK"); @@ -505,7 +506,7 @@ TextFieldArgs. builder().name("product").build(), NumericFieldArgs. builder().name("sales").sortable().build(), NumericFieldArgs. builder().name("profit").sortable().build()); - CreateArgs createArgs = CreateArgs. builder().addPrefix("sales:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("sales:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate("sales-idx", createArgs, fields)).isEqualTo("OK"); @@ -582,7 +583,7 @@ TextFieldArgs. builder().name("priority").sortable().build(), NumericFieldArgs. builder().name("score").sortable().build(), NumericFieldArgs. builder().name("age").sortable().build()); - CreateArgs createArgs = CreateArgs. builder().addPrefix("task:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("task:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate("tasks-idx", createArgs, fields)).isEqualTo("OK"); @@ -652,7 +653,7 @@ NumericFieldArgs. builder().name("temperature").sortable().build(), NumericFieldArgs. builder().name("humidity").sortable().build(), NumericFieldArgs. builder().name("pressure").sortable().build()); - CreateArgs createArgs = CreateArgs. builder().addPrefix("weather:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("weather:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate("weather-idx", createArgs, fields)).isEqualTo("OK"); @@ -1749,4 +1750,560 @@ NumericFieldArgs. builder().name("assists").sortable().build(), assertThat(redis.ftDropindex("sortby-multi-test-idx")).isEqualTo("OK"); } + @Test + void shouldRespectUserSpecifiedPipelineOperationOrder() { + // Create an index for testing pipeline operation order + List> fields = Arrays.asList(TextFieldArgs. builder().name("title").build(), + NumericFieldArgs. builder().name("price").sortable().build(), + NumericFieldArgs. builder().name("quantity").sortable().build(), + TagFieldArgs. builder().name("category").sortable().build()); + + assertThat(redis.ftCreate("pipeline-order-test-idx", fields)).isEqualTo("OK"); + + // Add test documents + Map product1 = new HashMap<>(); + product1.put("title", "Product A"); + product1.put("price", "100"); + product1.put("quantity", "5"); + product1.put("category", "electronics"); + assertThat(redis.hmset("product:1", product1)).isEqualTo("OK"); + + Map product2 = new HashMap<>(); + product2.put("title", "Product B"); + product2.put("price", "200"); + product2.put("quantity", "3"); + product2.put("category", "electronics"); + assertThat(redis.hmset("product:2", product2)).isEqualTo("OK"); + + Map product3 = new HashMap<>(); + product3.put("title", "Product C"); + product3.put("price", "50"); + product3.put("quantity", "10"); + product3.put("category", "books"); + assertThat(redis.hmset("product:3", product3)).isEqualTo("OK"); + + // Test that operations are applied in user-specified order + // This specific order: APPLY -> FILTER -> GROUPBY -> LIMIT -> SORTBY + // should work correctly and produce meaningful results + AggregateArgs args = AggregateArgs. builder().load("title").load("price") + .load("quantity").load("category").apply("@price * @quantity", "total_value") // Calculate total + // value first + .filter("@total_value > 550") // Filter by total value (should keep only products 1 and 2, both electronics) + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("product_count")) + .reduce(Reducer. sum("@total_value").as("category_total"))) + .limit(0, 10) // Limit results + .sortBy("category_total", SortDirection.DESC) // Sort by category total + .build(); + + AggregationReply result = redis.ftAggregate("pipeline-order-test-idx", "*", args); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have only electronics category since books total_value (50*10=500) < 550 + // but electronics products (100*5=500, 200*3=600) both > 550 + assertThat(searchReply.getResults()).hasSize(1); + + SearchReply.SearchResult electronicsGroup = searchReply.getResults().get(0); + assertThat(electronicsGroup.getFields().get("category")).isEqualTo("electronics"); + assertThat(electronicsGroup.getFields().get("product_count")).isEqualTo("1"); + assertThat(electronicsGroup.getFields().get("category_total")).isEqualTo("600"); + } + + @Test + void shouldSupportDynamicReentrantPipeline() { + // Test the dynamic and re-entrant nature of aggregation pipelines + // Example from Redis docs: group by property X, sort top 100 by group size, + // then group by property Y and sort by some other property + + List> fields = Arrays.asList(TextFieldArgs. builder().name("product_name").build(), + TagFieldArgs. builder().name("category").sortable().build(), + TagFieldArgs. builder().name("brand").sortable().build(), + NumericFieldArgs. builder().name("price").sortable().build(), + NumericFieldArgs. builder().name("rating").sortable().build(), + NumericFieldArgs. builder().name("sales_count").sortable().build()); + + assertThat(redis.ftCreate("reentrant-pipeline-idx", fields)).isEqualTo("OK"); + + // Add diverse test data + String[][] products = { { "laptop:1", "Gaming Laptop", "electronics", "BrandA", "1200", "4.5", "150" }, + { "laptop:2", "Business Laptop", "electronics", "BrandB", "800", "4.2", "200" }, + { "laptop:3", "Budget Laptop", "electronics", "BrandA", "400", "3.8", "300" }, + { "phone:1", "Flagship Phone", "electronics", "BrandC", "900", "4.7", "500" }, + { "phone:2", "Mid-range Phone", "electronics", "BrandC", "500", "4.1", "400" }, + { "book:1", "Programming Book", "books", "PublisherA", "50", "4.6", "100" }, + { "book:2", "Design Book", "books", "PublisherB", "40", "4.3", "80" }, + { "book:3", "Business Book", "books", "PublisherA", "35", "4.0", "120" } }; + + for (String[] product : products) { + Map doc = new HashMap<>(); + doc.put("product_name", product[1]); + doc.put("category", product[2]); + doc.put("brand", product[3]); + doc.put("price", product[4]); + doc.put("rating", product[5]); + doc.put("sales_count", product[6]); + assertThat(redis.hmset(product[0], doc)).isEqualTo("OK"); + } + + // Complex re-entrant pipeline: + // 1. First grouping by category with multiple reducers + // 2. Apply transformation on group results + // 3. Filter based on computed values + // 4. Second grouping by a computed field + // 5. Sort by different criteria + // 6. Apply another transformation + // 7. Final filtering and limiting + AggregateArgs complexArgs = AggregateArgs. builder().load("category").load("brand") + .load("price").load("rating").load("sales_count") + // First aggregation: group by category + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("product_count")) + .reduce(Reducer. avg("@price").as("avg_price")) + .reduce(Reducer. sum("@sales_count").as("total_sales")) + .reduce(Reducer. avg("@rating").as("avg_rating"))) + // Apply transformation to create performance score + .apply("@avg_rating * @total_sales / 100", "performance_score") + // Filter categories with good performance + .filter("@performance_score > 15") + // Sort by performance score to get top categories + .sortBy("performance_score", SortDirection.DESC) + // Limit to top performing categories + .limit(0, 2) + // Apply another transformation for price tier calculation + .apply("@avg_price / 100", "price_tier").build(); + + AggregationReply result = redis.ftAggregate("reentrant-pipeline-idx", "*", complexArgs); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have results (electronics should be top performer) + assertThat(searchReply.getResults()).isNotEmpty(); + + // Verify the pipeline operations were applied in correct order + SearchReply.SearchResult topCategory = searchReply.getResults().get(0); + assertThat(topCategory.getFields()).containsKey("category"); + assertThat(topCategory.getFields()).containsKey("performance_score"); + assertThat(topCategory.getFields()).containsKey("price_tier"); + + // Electronics should be the top performer + assertThat(topCategory.getFields().get("category")).isEqualTo("electronics"); + } + + @Test + void shouldSupportMultipleRepeatedOperations() { + // Test that operations can be repeated multiple times in the pipeline + // This demonstrates the re-entrant nature where each operation can appear multiple times + + List> fields = Arrays.asList(TextFieldArgs. builder().name("employee_name").build(), + TagFieldArgs. builder().name("department").sortable().build(), + TagFieldArgs. builder().name("level").sortable().build(), + NumericFieldArgs. builder().name("salary").sortable().build(), + NumericFieldArgs. builder().name("experience").sortable().build(), + NumericFieldArgs. builder().name("performance_score").sortable().build()); + + assertThat(redis.ftCreate("repeated-ops-idx", fields)).isEqualTo("OK"); + + // Add employee data + String[][] employees = { { "emp:1", "Alice Johnson", "engineering", "senior", "120000", "8", "92" }, + { "emp:2", "Bob Smith", "engineering", "junior", "80000", "3", "85" }, + { "emp:3", "Carol Davis", "engineering", "mid", "100000", "5", "88" }, + { "emp:4", "David Wilson", "sales", "senior", "110000", "7", "90" }, + { "emp:5", "Eve Brown", "sales", "junior", "70000", "2", "82" }, + { "emp:6", "Frank Miller", "marketing", "mid", "90000", "4", "87" }, + { "emp:7", "Grace Lee", "marketing", "senior", "105000", "6", "91" } }; + + for (String[] emp : employees) { + Map doc = new HashMap<>(); + doc.put("employee_name", emp[1]); + doc.put("department", emp[2]); + doc.put("level", emp[3]); + doc.put("salary", emp[4]); + doc.put("experience", emp[5]); + doc.put("performance_score", emp[6]); + assertThat(redis.hmset(emp[0], doc)).isEqualTo("OK"); + } + + // Pipeline with repeated operations demonstrating re-entrant nature: + // Multiple APPLY operations, multiple FILTER operations, multiple GROUPBY operations + AggregateArgs repeatedOpsArgs = AggregateArgs. builder().load("department") + .load("level").load("salary").load("experience").load("performance_score") + // First APPLY: Calculate salary per experience year + .apply("@salary / @experience", "salary_per_year") + // First FILTER: Filter experienced employees + .filter("@experience >= 3") + // Second APPLY: Calculate performance bonus + .apply("@performance_score * 1000", "performance_bonus") + // First GROUPBY: Group by department + .groupBy(GroupBy. of("department").reduce(Reducer. count().as("employee_count")) + .reduce(Reducer. avg("@salary").as("avg_salary")) + .reduce(Reducer. avg("@performance_score").as("avg_performance"))) + // Third APPLY: Calculate department efficiency + .apply("@avg_performance / (@avg_salary / 1000)", "efficiency_ratio") + // Second FILTER: Filter efficient departments + .filter("@efficiency_ratio > 0.8") + // First SORTBY: Sort by efficiency + .sortBy("efficiency_ratio", SortDirection.DESC) + // Fourth APPLY: Calculate performance score + .apply("@efficiency_ratio * 100", "performance_score") + // Second GROUPBY: Re-group by efficiency level (using rounded efficiency ratio) + .groupBy(GroupBy. of("efficiency_ratio") + .reduce(Reducer. count().as("dept_count")) + .reduce(Reducer. avg("@avg_salary").as("class_avg_salary"))) + // Second SORTBY: Sort by class average salary + .sortBy("class_avg_salary", SortDirection.DESC) + // Third FILTER: Final filter + .filter("@dept_count > 0").build(); + + AggregationReply result = redis.ftAggregate("repeated-ops-idx", "*", repeatedOpsArgs); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have results showing performance classes + assertThat(searchReply.getResults()).isNotEmpty(); + + // Verify the repeated operations worked correctly + for (SearchReply.SearchResult efficiencyGroup : searchReply.getResults()) { + assertThat(efficiencyGroup.getFields()).containsKey("efficiency_ratio"); + assertThat(efficiencyGroup.getFields()).containsKey("dept_count"); + assertThat(efficiencyGroup.getFields()).containsKey("class_avg_salary"); + + // Verify efficiency ratio is a positive number + double efficiencyRatio = Double.parseDouble(efficiencyGroup.getFields().get("efficiency_ratio")); + assertThat(efficiencyRatio).isGreaterThan(0.0); + } + } + + @Test + void shouldSupportComplexPipelineWithInterleavedOperations() { + // Test complex interleaving of operations as mentioned in Redis docs: + // "group by property X, sort the top 100 results by group size, + // then group by property Y and sort the results by some other property" + + List> fields = Arrays.asList(TextFieldArgs. builder().name("transaction_id").build(), + TagFieldArgs. builder().name("customer_segment").sortable().build(), + TagFieldArgs. builder().name("product_category").sortable().build(), + TagFieldArgs. builder().name("region").sortable().build(), + NumericFieldArgs. builder().name("amount").sortable().build(), + NumericFieldArgs. builder().name("quantity").sortable().build(), + NumericFieldArgs. builder().name("discount").sortable().build()); + + assertThat(redis.ftCreate("interleaved-ops-idx", fields)).isEqualTo("OK"); + + // Add transaction data representing different customer segments, regions, and categories + String[][] transactions = { { "txn:1", "T001", "premium", "electronics", "north", "1500", "2", "5" }, + { "txn:2", "T002", "premium", "electronics", "south", "1200", "1", "10" }, + { "txn:3", "T003", "standard", "electronics", "north", "800", "3", "0" }, + { "txn:4", "T004", "standard", "books", "east", "150", "5", "15" }, + { "txn:5", "T005", "budget", "books", "west", "80", "8", "20" }, + { "txn:6", "T006", "premium", "clothing", "north", "600", "4", "8" }, + { "txn:7", "T007", "standard", "clothing", "south", "300", "6", "12" }, + { "txn:8", "T008", "budget", "electronics", "east", "400", "2", "25" }, + { "txn:9", "T009", "premium", "books", "west", "200", "10", "5" }, + { "txn:10", "T010", "standard", "electronics", "north", "900", "1", "7" } }; + + for (String[] txn : transactions) { + Map doc = new HashMap<>(); + doc.put("transaction_id", txn[1]); + doc.put("customer_segment", txn[2]); + doc.put("product_category", txn[3]); + doc.put("region", txn[4]); + doc.put("amount", txn[5]); + doc.put("quantity", txn[6]); + doc.put("discount", txn[7]); + assertThat(redis.hmset(txn[0], doc)).isEqualTo("OK"); + } + + // Complex interleaved pipeline demonstrating the Redis docs example: + AggregateArgs interleavedArgs = AggregateArgs. builder().load("customer_segment") + .load("product_category").load("region").load("amount").load("quantity").load("discount") + // Calculate net amount after discount + .apply("@amount * (100 - @discount) / 100", "net_amount") + // First grouping: Group by customer_segment (property X) + .groupBy(GroupBy. of("customer_segment") + .reduce(Reducer. count().as("segment_transactions")) + .reduce(Reducer. sum("@net_amount").as("segment_revenue")) + .reduce(Reducer. avg("@quantity").as("avg_quantity"))) + // Apply transformation to calculate revenue per transaction + .apply("@segment_revenue / @segment_transactions", "revenue_per_transaction") + // Sort by group size (segment_transactions) and limit to top results + .sortBy("segment_transactions", SortDirection.DESC).limit(0, 10) // Top 10 segments by transaction count + // Filter segments with significant revenue + .filter("@segment_revenue > 500") + // Apply value score calculation + .apply("@revenue_per_transaction / 100", "value_score") + // Second grouping: Group by value_score (property Y) + .groupBy(GroupBy. of("value_score").reduce(Reducer. count().as("tier_count")) + .reduce(Reducer. sum("@segment_revenue").as("tier_total_revenue")) + .reduce(Reducer. avg("@revenue_per_transaction").as("tier_avg_revenue"))) + // Sort by different property (tier_total_revenue) + .sortBy("tier_total_revenue", SortDirection.DESC) + // Final transformation and filtering + .apply("@tier_total_revenue / @tier_count", "revenue_efficiency").filter("@tier_count > 0").build(); + + AggregationReply result = redis.ftAggregate("interleaved-ops-idx", "*", interleavedArgs); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have results showing value tiers + assertThat(searchReply.getResults()).isNotEmpty(); + + // Verify the complex interleaved operations worked correctly + for (SearchReply.SearchResult valueGroup : searchReply.getResults()) { + assertThat(valueGroup.getFields()).containsKey("value_score"); + assertThat(valueGroup.getFields()).containsKey("tier_count"); + assertThat(valueGroup.getFields()).containsKey("tier_total_revenue"); + assertThat(valueGroup.getFields()).containsKey("revenue_efficiency"); + + // Verify value score is a positive number + double valueScore = Double.parseDouble(valueGroup.getFields().get("value_score")); + assertThat(valueScore).isGreaterThan(0.0); + } + } + + @Test + void shouldSupportPipelineWithMultipleFiltersAndSorts() { + // Test pipeline with multiple FILTER and SORTBY operations at different stages + // This demonstrates that operations can be repeated and applied at various pipeline stages + + List> fields = Arrays.asList(TextFieldArgs. builder().name("product_id").build(), + TagFieldArgs. builder().name("category").sortable().build(), + TagFieldArgs. builder().name("brand").sortable().build(), + NumericFieldArgs. builder().name("price").sortable().build(), + NumericFieldArgs. builder().name("stock").sortable().build(), + NumericFieldArgs. builder().name("rating").sortable().build(), + NumericFieldArgs. builder().name("reviews_count").sortable().build()); + + assertThat(redis.ftCreate("multi-filter-sort-idx", fields)).isEqualTo("OK"); + + // Add product inventory data + String[][] products = { { "prod:1", "P001", "electronics", "BrandA", "299", "50", "4.2", "120" }, + { "prod:2", "P002", "electronics", "BrandB", "199", "30", "3.8", "85" }, + { "prod:3", "P003", "electronics", "BrandA", "399", "20", "4.5", "200" }, + { "prod:4", "P004", "books", "PublisherX", "25", "100", "4.1", "45" }, + { "prod:5", "P005", "books", "PublisherY", "35", "75", "4.3", "60" }, + { "prod:6", "P006", "clothing", "BrandC", "89", "40", "3.9", "30" }, + { "prod:7", "P007", "clothing", "BrandD", "129", "25", "4.0", "55" }, + { "prod:8", "P008", "electronics", "BrandB", "599", "15", "4.7", "300" }, + { "prod:9", "P009", "books", "PublisherX", "45", "60", "4.4", "80" }, + { "prod:10", "P010", "clothing", "BrandC", "159", "35", "4.2", "70" } }; + + for (String[] prod : products) { + Map doc = new HashMap<>(); + doc.put("product_id", prod[1]); + doc.put("category", prod[2]); + doc.put("brand", prod[3]); + doc.put("price", prod[4]); + doc.put("stock", prod[5]); + doc.put("rating", prod[6]); + doc.put("reviews_count", prod[7]); + assertThat(redis.hmset(prod[0], doc)).isEqualTo("OK"); + } + + // Pipeline with multiple filters and sorts at different stages: + AggregateArgs multiFilterSortArgs = AggregateArgs. builder().load("category") + .load("brand").load("price").load("stock").load("rating").load("reviews_count") + // First filter: Only products with decent ratings + .filter("@rating >= 4.0") + // Calculate popularity score + .apply("@rating * @reviews_count", "popularity_score") + // Second filter: Only popular products + .filter("@popularity_score > 200") + // First sort: Sort by popularity + .sortBy("popularity_score", SortDirection.DESC) + // Calculate inventory value + .apply("@price * @stock", "inventory_value") + // Group by category to analyze category performance + .groupBy(GroupBy. of("category").reduce(Reducer. count().as("product_count")) + .reduce(Reducer. sum("@inventory_value").as("total_inventory_value")) + .reduce(Reducer. avg("@popularity_score").as("avg_popularity")) + .reduce(Reducer. max("@price").as("max_price"))) + // Third filter: Categories with significant inventory + .filter("@total_inventory_value > 5000") + // Calculate value density + .apply("@total_inventory_value / @product_count", "value_density") + // Second sort: Sort by value density + .sortBy("value_density", SortDirection.DESC) + // Fourth filter: High-value categories only + .filter("@value_density > 1000") + // Apply final score calculation + .apply("@avg_popularity / 100", "category_score") + // Group by score for final analysis + .groupBy(GroupBy. of("category_score") + .reduce(Reducer. count().as("tier_category_count")) + .reduce(Reducer. sum("@total_inventory_value").as("tier_inventory_value")) + .reduce(Reducer. avg("@max_price").as("tier_avg_max_price"))) + // Third sort: Final sort by tier inventory value + .sortBy("tier_inventory_value", SortDirection.DESC) + // Fifth filter: Final filter for meaningful tiers + .filter("@tier_category_count > 0").limit(0, 5).build(); + + AggregationReply result = redis.ftAggregate("multi-filter-sort-idx", "*", multiFilterSortArgs); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have results showing category tiers + assertThat(searchReply.getResults()).isNotEmpty(); + + // Verify the multiple filters and sorts worked correctly + for (SearchReply.SearchResult categoryGroup : searchReply.getResults()) { + assertThat(categoryGroup.getFields()).containsKey("category_score"); + assertThat(categoryGroup.getFields()).containsKey("tier_category_count"); + assertThat(categoryGroup.getFields()).containsKey("tier_inventory_value"); + assertThat(categoryGroup.getFields()).containsKey("tier_avg_max_price"); + + // Verify category score is a positive number + double categoryScore = Double.parseDouble(categoryGroup.getFields().get("category_score")); + assertThat(categoryScore).isGreaterThan(0.0); + + // Verify that filters were applied correctly (positive values) + int categoryCount = Integer.parseInt(categoryGroup.getFields().get("tier_category_count")); + assertThat(categoryCount).isGreaterThan(0); + } + } + + @Test + void shouldSupportAdvancedDynamicPipelineWithConditionalLogic() { + // Test the most advanced scenario: dynamic pipeline with conditional logic, + // multiple re-entrant operations, and complex transformations that build upon each other + // This represents a real-world business intelligence scenario + + List> fields = Arrays.asList(TextFieldArgs. builder().name("order_id").build(), + TagFieldArgs. builder().name("customer_type").sortable().build(), + TagFieldArgs. builder().name("product_line").sortable().build(), + TagFieldArgs. builder().name("sales_channel").sortable().build(), + TagFieldArgs. builder().name("season").sortable().build(), + NumericFieldArgs. builder().name("order_value").sortable().build(), + NumericFieldArgs. builder().name("cost").sortable().build(), + NumericFieldArgs. builder().name("shipping_cost").sortable().build(), + NumericFieldArgs. builder().name("customer_satisfaction").sortable().build()); + + assertThat(redis.ftCreate("advanced-pipeline-idx", fields)).isEqualTo("OK"); + + // Add comprehensive business data + String[][] orders = { { "order:1", "O001", "enterprise", "software", "online", "spring", "15000", "8000", "200", "9" }, + { "order:2", "O002", "smb", "software", "direct", "spring", "5000", "2500", "100", "8" }, + { "order:3", "O003", "individual", "hardware", "online", "summer", "800", "500", "50", "7" }, + { "order:4", "O004", "enterprise", "hardware", "partner", "summer", "25000", "15000", "500", "9" }, + { "order:5", "O005", "smb", "services", "direct", "fall", "3000", "1800", "0", "8" }, + { "order:6", "O006", "individual", "software", "online", "fall", "200", "100", "25", "6" }, + { "order:7", "O007", "enterprise", "services", "partner", "winter", "12000", "7000", "300", "9" }, + { "order:8", "O008", "smb", "hardware", "online", "winter", "2000", "1200", "75", "7" }, + { "order:9", "O009", "individual", "services", "direct", "spring", "500", "300", "30", "8" }, + { "order:10", "O010", "enterprise", "software", "online", "summer", "18000", "10000", "250", "9" } }; + + for (String[] order : orders) { + Map doc = new HashMap<>(); + doc.put("order_id", order[1]); + doc.put("customer_type", order[2]); + doc.put("product_line", order[3]); + doc.put("sales_channel", order[4]); + doc.put("season", order[5]); + doc.put("order_value", order[6]); + doc.put("cost", order[7]); + doc.put("shipping_cost", order[8]); + doc.put("customer_satisfaction", order[9]); + assertThat(redis.hmset(order[0], doc)).isEqualTo("OK"); + } + + // Advanced dynamic pipeline with conditional logic and multiple re-entrant operations: + AggregateArgs advancedArgs = AggregateArgs. builder().load("customer_type") + .load("product_line").load("sales_channel").load("season").load("order_value").load("cost") + .load("shipping_cost").load("customer_satisfaction") + + // Stage 1: Calculate basic business metrics + .apply("@order_value - @cost - @shipping_cost", "profit").apply("@profit / @order_value * 100", "profit_margin") + + // Stage 2: Filter profitable orders only + .filter("@profit > 0") + + // Stage 3: Calculate customer value score + .apply("@order_value / 1000", "customer_value_score") + + // Stage 4: First aggregation - group by customer type + .groupBy(GroupBy. of("customer_type") + .reduce(Reducer. count().as("segment_orders")) + .reduce(Reducer. sum("@profit").as("segment_profit")) + .reduce(Reducer. avg("@profit_margin").as("avg_margin")) + .reduce(Reducer. avg("@customer_satisfaction").as("avg_satisfaction"))) + + // Stage 5: Calculate segment performance score + .apply("(@avg_satisfaction * @avg_margin * @segment_orders) / 100", "performance_score") + + // Stage 6: Filter segments with any performance + .filter("@performance_score > 0") + + // Stage 7: Sort by performance and limit to top segments + .sortBy("performance_score", SortDirection.DESC).limit(0, 5) + + // Stage 8: Calculate normalized performance + .apply("@performance_score / 10", "normalized_performance") + + // Stage 9: Calculate business impact metrics + .apply("@segment_profit / @segment_orders", "profit_per_order") + .apply("@profit_per_order / 1000", "business_impact_score") + + // Stage 10: Second aggregation - re-group by business impact score + .groupBy(GroupBy. of("business_impact_score") + .reduce(Reducer. count().as("impact_segment_count")) + .reduce(Reducer. sum("@segment_profit").as("total_impact_profit")) + .reduce(Reducer. avg("@performance_score").as("avg_impact_performance")) + .reduce(Reducer. max("@avg_satisfaction").as("max_satisfaction"))) + + // Stage 11: Calculate final business metrics + .apply("@total_impact_profit / @impact_segment_count", "profit_efficiency") + .apply("(@avg_impact_performance + @max_satisfaction * 10) / 2", "composite_score") + + // Stage 12: Final filtering and sorting + .filter("@composite_score > 0").sortBy("composite_score", SortDirection.DESC) + + // Stage 13: Final strategic score calculation + .apply("@composite_score / 50", "strategic_score") + + .build(); + + AggregationReply result = redis.ftAggregate("advanced-pipeline-idx", "*", advancedArgs); + + assertThat(result).isNotNull(); + assertThat(result.getReplies()).hasSize(1); + SearchReply searchReply = result.getReplies().get(0); + + // Should have results showing business impact analysis + assertThat(searchReply.getResults()).isNotEmpty(); + + // Verify the advanced dynamic pipeline worked correctly + for (SearchReply.SearchResult impactGroup : searchReply.getResults()) { + // Verify all computed fields are present + assertThat(impactGroup.getFields()).containsKey("business_impact_score"); + assertThat(impactGroup.getFields()).containsKey("impact_segment_count"); + assertThat(impactGroup.getFields()).containsKey("total_impact_profit"); + assertThat(impactGroup.getFields()).containsKey("profit_efficiency"); + assertThat(impactGroup.getFields()).containsKey("composite_score"); + assertThat(impactGroup.getFields()).containsKey("strategic_score"); + + // Verify business impact score is a positive number + double impactScore = Double.parseDouble(impactGroup.getFields().get("business_impact_score")); + assertThat(impactScore).isGreaterThan(0.0); + + // Verify strategic score is a positive number + double strategicScore = Double.parseDouble(impactGroup.getFields().get("strategic_score")); + assertThat(strategicScore).isGreaterThan(0.0); + + // Verify that all metrics are positive (filters worked correctly) + double compositeScore = Double.parseDouble(impactGroup.getFields().get("composite_score")); + assertThat(compositeScore).isGreaterThan(0.0); + + int segmentCount = Integer.parseInt(impactGroup.getFields().get("impact_segment_count")); + assertThat(segmentCount).isGreaterThan(0); + } + } + } diff --git a/src/test/java/io/lettuce/core/search/RediSearchClusterIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchClusterIntegrationTests.java new file mode 100644 index 0000000000..e415f8be95 --- /dev/null +++ b/src/test/java/io/lettuce/core/search/RediSearchClusterIntegrationTests.java @@ -0,0 +1,251 @@ +/* + * Copyright 2024-2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ + +package io.lettuce.core.search; + +import io.lettuce.core.RedisURI; +import io.lettuce.core.cluster.RedisClusterClient; +import io.lettuce.core.cluster.SlotHash; +import io.lettuce.core.cluster.api.sync.RedisAdvancedClusterCommands; +import io.lettuce.core.search.arguments.AggregateArgs; +import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.FieldArgs; +import io.lettuce.core.search.arguments.SearchArgs; +import io.lettuce.core.search.arguments.NumericFieldArgs; +import io.lettuce.core.search.arguments.TagFieldArgs; +import io.lettuce.core.search.arguments.TextFieldArgs; +import io.lettuce.test.condition.RedisConditions; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static io.lettuce.TestTags.INTEGRATION_TEST; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration tests for Redis Search functionality in a cluster environment. + *

+ * These tests verify that FT.SEARCH and FT.CURSOR commands work correctly across multiple cluster nodes, ensuring that search + * operations can find data distributed across different shards. + *

+ * + * @author Tihomir Mateev + * @since 6.8 + */ +@Tag(INTEGRATION_TEST) +public class RediSearchClusterIntegrationTests { + + // Index names + private static final String PRODUCTS_INDEX = "products-cluster-idx"; + + private static final String BOOKS_INDEX = "books-cluster-idx"; + + // Prefixes + private static final String PRODUCT_PREFIX = "product:cluster:"; + + private static final String BOOK_PREFIX = "book:cluster:"; + + protected static RedisClusterClient client; + + protected static RedisAdvancedClusterCommands redis; + + public RediSearchClusterIntegrationTests() { + RedisURI redisURI = RedisURI.Builder.redis("127.0.0.1").withPort(36379).build(); + client = RedisClusterClient.create(redisURI); + redis = client.connect().sync(); + } + + @BeforeEach + public void prepare() { + // 7.4 and 7.2 have a different behavior, but we do not want to test for old versions + assumeTrue(RedisConditions.of(redis).hasVersionGreaterOrEqualsTo("8.0")); + + redis.flushall(); + } + + @AfterAll + static void teardown() { + if (client != null) { + client.shutdown(); + } + } + + /** + * Test FT.SEARCH command in cluster environment with data distributed across multiple shards. This test creates an index, + * inserts data with keys that hash to different slots, and verifies that search works across all cluster nodes. + */ + @Test + void testFtSearchAcrossMultipleShards() { + // Create field definitions + FieldArgs nameField = TextFieldArgs. builder().name("name").build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().withPrefix(PRODUCT_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + // Create index on all cluster nodes + assertThat(redis.ftCreate(PRODUCTS_INDEX, createArgs, Arrays.asList(nameField, categoryField, priceField))) + .isEqualTo("OK"); + + // Create test data with keys that hash to different slots + String[] productKeys = { "product:cluster:laptop1", // Different hash slots + "product:cluster:mouse2", "product:cluster:keyboard3", "product:cluster:monitor4", "product:cluster:tablet5", + "product:cluster:phone6" }; + + // Verify keys are distributed across different slots + Map keySlots = new HashMap<>(); + for (String key : productKeys) { + int slot = SlotHash.getSlot(key); + keySlots.put(key, slot); + } + + // Ensure we have keys in at least 2 different slots + long uniqueSlots = keySlots.values().stream().distinct().count(); + assertThat(uniqueSlots).isGreaterThanOrEqualTo(2); + + // Insert test data + Map laptop = new HashMap<>(); + laptop.put("name", "Gaming Laptop"); + laptop.put("category", "electronics"); + laptop.put("price", "1299.99"); + redis.hmset(productKeys[0], laptop); + + Map mouse = new HashMap<>(); + mouse.put("name", "Wireless Mouse"); + mouse.put("category", "electronics"); + mouse.put("price", "29.99"); + redis.hmset(productKeys[1], mouse); + + Map keyboard = new HashMap<>(); + keyboard.put("name", "Mechanical Keyboard"); + keyboard.put("category", "electronics"); + keyboard.put("price", "149.99"); + redis.hmset(productKeys[2], keyboard); + + Map monitor = new HashMap<>(); + monitor.put("name", "4K Monitor"); + monitor.put("category", "electronics"); + monitor.put("price", "399.99"); + redis.hmset(productKeys[3], monitor); + + Map tablet = new HashMap<>(); + tablet.put("name", "Android Tablet"); + tablet.put("category", "mobile"); + tablet.put("price", "299.99"); + redis.hmset(productKeys[4], tablet); + + Map phone = new HashMap<>(); + phone.put("name", "Smartphone"); + phone.put("category", "mobile"); + phone.put("price", "699.99"); + redis.hmset(productKeys[5], phone); + + // Test 1: Search for all electronics across cluster + SearchReply searchResults = redis.ftSearch(PRODUCTS_INDEX, "@category:{electronics}"); + + // Verify we get results - should find laptop, mouse, keyboard, monitor + assertThat(searchResults.getCount()).isEqualTo(4); + assertThat(searchResults.getResults()).hasSize(4); + + // Test 2: Search with price range across cluster + SearchArgs priceSearchArgs = SearchArgs. builder().build(); + SearchReply priceResults = redis.ftSearch(PRODUCTS_INDEX, "@price:[100 500]", priceSearchArgs); + + // Should find keyboard, monitor, tablet (prices 149.99, 399.99, 299.99) + assertThat(priceResults.getCount()).isEqualTo(3); + + // Test 3: Text search across cluster + SearchReply textResults = redis.ftSearch(PRODUCTS_INDEX, "@name:Gaming"); + + // Should find only the Gaming Laptop + assertThat(textResults.getCount()).isEqualTo(1); + assertThat(textResults.getResults().get(0).getFields().get("name")).isEqualTo("Gaming Laptop"); + + // Cleanup + redis.ftDropindex(PRODUCTS_INDEX); + } + + /** + * Test FT.CURSOR functionality in cluster environment. This test creates an aggregation with cursor and verifies cursor + * operations work across cluster nodes. + */ + @Test + void testFtCursorAcrossMultipleShards() { + // Create field definitions for books + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs authorField = TagFieldArgs. builder().name("author").build(); + FieldArgs yearField = NumericFieldArgs. builder().name("year").sortable().build(); + FieldArgs ratingField = NumericFieldArgs. builder().name("rating").sortable().build(); + + CreateArgs createArgs = CreateArgs. builder().withPrefix(BOOK_PREFIX) + .on(CreateArgs.TargetType.HASH).build(); + + // Create index on cluster + String createResult = redis.ftCreate(BOOKS_INDEX, createArgs, + Arrays.asList(titleField, authorField, yearField, ratingField)); + + // Verify index creation + assertThat(createResult).isEqualTo("OK"); + + // Create test data with keys that hash to different slots + String[] bookKeys = { "book:cluster:scifi1", "book:cluster:fantasy2", "book:cluster:mystery3", "book:cluster:romance4", + "book:cluster:thriller5", "book:cluster:biography6", "book:cluster:history7", "book:cluster:science8" }; + + // Insert books data + String[][] booksData = { { "Dune", "frank_herbert", "1965", "4.2" }, { "Lord of the Rings", "tolkien", "1954", "4.5" }, + { "Sherlock Holmes", "doyle", "1887", "4.1" }, { "Pride and Prejudice", "austen", "1813", "4.0" }, + { "Gone Girl", "flynn", "2012", "3.9" }, { "Steve Jobs", "isaacson", "2011", "4.3" }, + { "Sapiens", "harari", "2011", "4.4" }, { "Cosmos", "sagan", "1980", "4.6" } }; + + for (int i = 0; i < bookKeys.length; i++) { + Map book = new HashMap<>(); + book.put("title", booksData[i][0]); + book.put("author", booksData[i][1]); + book.put("year", booksData[i][2]); + book.put("rating", booksData[i][3]); + redis.hmset(bookKeys[i], book); + } + + // Test aggregation with cursor - group by author and get average rating + AggregateArgs aggregateArgs = AggregateArgs. builder() + .groupBy(AggregateArgs.GroupBy. of("author") + .reduce(AggregateArgs.Reducer. avg("@rating").as("avg_rating"))) + .withCursor(AggregateArgs.WithCursor.of(2L)) // Small batch size to test cursor functionality + .build(); + + // Execute aggregation with cursor + AggregationReply aggregateResults = redis.ftAggregate(BOOKS_INDEX, "*", aggregateArgs); + + // Verify we get results with cursor + assertThat(aggregateResults).isNotNull(); + assertThat(aggregateResults.getAggregationGroups()).isGreaterThan(0); + + // Test cursor read functionality if cursor is available + if (aggregateResults.getCursorId() != -1 && aggregateResults.getCursorId() > 0) { + // Read next batch using cursor + AggregationReply cursorResults = redis.ftCursorread(BOOKS_INDEX, aggregateResults.getCursorId()); + + // Verify cursor read works + assertThat(cursorResults).isNotNull(); + + // The cursor results should be valid (either have data or indicate completion) + // Cursor ID of 0 indicates end of results + assertThat(cursorResults.getCursorId()).isGreaterThanOrEqualTo(0); + } + + // Cleanup + redis.ftDropindex(BOOKS_INDEX); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java index 50c6a6849e..1f870bb651 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchGeospatialIntegrationTests.java @@ -90,7 +90,7 @@ void testGeoFieldBasicFunctionality() { FieldArgs nameField = TextFieldArgs. builder().name("name").build(); FieldArgs cityField = TextFieldArgs. builder().name("city").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("store:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("store:") .on(CreateArgs.TargetType.HASH).build(); String result = redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField, cityField)); @@ -148,7 +148,7 @@ void testGeoFieldMultipleLocations() { FieldArgs locationField = GeoFieldArgs. builder().name("locations").build(); FieldArgs productField = TextFieldArgs. builder().name("product").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("product:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("product:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, productField)); @@ -185,7 +185,7 @@ void testGeoshapePointSphericalCoordinates() { FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").spherical().build(); FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("location:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("location:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(GEOSHAPE_INDEX, createArgs, Arrays.asList(geomField, nameField)); @@ -232,7 +232,7 @@ void testGeoshapePolygonSpatialRelationships() { FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").flat().build(); FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("shape:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("shape:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(CARTESIAN_INDEX, createArgs, Arrays.asList(geomField, nameField)); @@ -319,7 +319,7 @@ void testComplexGeospatialQueries() { FieldArgs categoryField = TextFieldArgs. builder().name("category").build(); FieldArgs ratingField = TextFieldArgs. builder().name("rating").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("business:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("business:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(GEO_INDEX, createArgs, @@ -380,7 +380,7 @@ void testGeospatialUnitsAndCoordinateSystems() { FieldArgs locationField = GeoFieldArgs. builder().name("location").build(); FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("poi:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("poi:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, nameField)); @@ -428,7 +428,7 @@ void testGeospatialErrorHandling() { FieldArgs geomField = GeoshapeFieldArgs. builder().name("geom").build(); FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("test:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("test:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(GEO_INDEX, createArgs, Arrays.asList(locationField, geomField, nameField)); diff --git a/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java index 46804ec850..e6272e8739 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java @@ -26,24 +26,17 @@ import io.lettuce.core.search.arguments.SynUpdateArgs; import io.lettuce.core.search.arguments.TagFieldArgs; import io.lettuce.core.search.arguments.TextFieldArgs; -import io.lettuce.core.search.arguments.VectorFieldArgs; -import io.lettuce.core.search.arguments.GeoFieldArgs; -import io.lettuce.core.search.arguments.GeoshapeFieldArgs; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static io.lettuce.TestTags.INTEGRATION_TEST; import static org.assertj.core.api.Assertions.assertThat; @@ -121,7 +114,7 @@ void testBasicTextSearchWithBlogPosts() { FieldArgs createdDateField = NumericFieldArgs. builder().name("created_date").sortable().build(); FieldArgs viewsField = NumericFieldArgs. builder().name("views").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(BLOG_PREFIX) .on(CreateArgs.TargetType.HASH).build(); String result = redis.ftCreate(BLOG_INDEX, createArgs, @@ -188,7 +181,7 @@ void testSearchOptionsAndModifiers() { FieldArgs titleField = TextFieldArgs. builder().name("title").sortable().build(); FieldArgs ratingField = NumericFieldArgs. builder().name("rating").sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(MOVIE_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(titleField, ratingField)); @@ -274,7 +267,7 @@ void testTagFieldsWithCustomSeparator() { FieldArgs titleField = TextFieldArgs. builder().name("title").build(); FieldArgs categoriesField = TagFieldArgs. builder().name("categories").separator(";").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(BOOK_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(BOOK_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(BOOKS_INDEX, createArgs, Arrays.asList(titleField, categoriesField)); @@ -326,7 +319,7 @@ void testNumericFieldOperations() { FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); FieldArgs stockField = NumericFieldArgs. builder().name("stock").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(PRODUCT_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(PRODUCTS_INDEX, createArgs, Arrays.asList(nameField, priceField, stockField)); @@ -395,7 +388,7 @@ void testAdvancedSearchFeatures() { FieldArgs contentField = TextFieldArgs. builder().name("content").build(); FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(BLOG_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(BLOG_INDEX, createArgs, Arrays.asList(titleField, contentField, categoryField)); @@ -456,7 +449,7 @@ void testComplexQueriesAndBooleanOperations() { FieldArgs tagsField = TagFieldArgs. builder().name("tags").build(); FieldArgs ratingField = NumericFieldArgs. builder().name("rating").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(MOVIE_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(titleField, descriptionField, tagsField, ratingField)); @@ -533,7 +526,7 @@ void testEmptyResultsAndEdgeCases() { // Create a simple index FieldArgs titleField = TextFieldArgs. builder().name("title").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(BLOG_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(BLOG_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(BLOG_INDEX, createArgs, Collections.singletonList(titleField)); @@ -889,7 +882,7 @@ void testFtSpellcheckCommand() { FieldArgs contentField = TextFieldArgs. builder().name("content").build(); // Create an index with some documents - CreateArgs createArgs = CreateArgs. builder().addPrefix("doc:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("doc:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, contentField))).isEqualTo("OK"); @@ -975,7 +968,7 @@ void testFtExplainCommand() { FieldArgs contentField = TextFieldArgs. builder().name("content").build(); // Create an index - CreateArgs createArgs = CreateArgs. builder().addPrefix("doc:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("doc:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, contentField))).isEqualTo("OK"); @@ -1018,12 +1011,12 @@ void testFtListCommand() { FieldArgs titleField = TextFieldArgs. builder().name("title").build(); // Create first index - CreateArgs createArgs1 = CreateArgs. builder().addPrefix("doc1:") + CreateArgs createArgs1 = CreateArgs. builder().withPrefix("doc1:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate(testIndex1, createArgs1, Collections.singletonList(titleField))).isEqualTo("OK"); // Create second index - CreateArgs createArgs2 = CreateArgs. builder().addPrefix("doc2:") + CreateArgs createArgs2 = CreateArgs. builder().withPrefix("doc2:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate(testIndex2, createArgs2, Collections.singletonList(titleField))).isEqualTo("OK"); @@ -1056,7 +1049,7 @@ void testFtSynonymCommands() { FieldArgs contentField = TextFieldArgs. builder().name("content").build(); // Create an index - CreateArgs createArgs = CreateArgs. builder().addPrefix("doc:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("doc:") .on(CreateArgs.TargetType.HASH).build(); assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, contentField))).isEqualTo("OK"); @@ -1110,33 +1103,4 @@ void testFtSynonymCommands() { assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); } - /** - * Helper method to encode a float vector as a byte array for Redis vector search. Redis expects vector data as binary - * representation of float values. - */ - private String encodeFloatVector(float[] vector) { - ByteBuffer buffer = ByteBuffer.allocate(vector.length * 4).order(ByteOrder.LITTLE_ENDIAN); - for (float value : vector) { - buffer.putFloat(value); - } - return new String(buffer.array(), StandardCharsets.ISO_8859_1); - } - - /** - * Helper method to create a bicycle document with all required fields. - */ - private void createBicycleDocument(String key, String pickupZone, String storeLocation, String brand, String model, - int price, String description, String condition) { - Map document = new HashMap<>(); - document.put("pickup_zone", pickupZone); - document.put("store_location", storeLocation); - document.put("brand", brand); - document.put("model", model); - document.put("price", String.valueOf(price)); - document.put("description", description); - document.put("condition", condition); - - assertThat(redis.hmset(key, document)).isEqualTo("OK"); - } - } diff --git a/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java index d48d893126..93da02e299 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchVectorIntegrationTests.java @@ -13,6 +13,7 @@ import io.lettuce.core.RedisCommandExecutionException; import io.lettuce.core.RedisURI; import io.lettuce.core.api.sync.RedisCommands; +import io.lettuce.core.codec.RedisCodec; import io.lettuce.core.search.arguments.CreateArgs; import io.lettuce.core.search.arguments.FieldArgs; import io.lettuce.core.search.arguments.NumericFieldArgs; @@ -144,7 +145,7 @@ void testFlatVectorIndexWithKnnSearch() { FieldArgs titleField = TextFieldArgs. builder().name("title").build(); FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(DOCS_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(DOCS_PREFIX) .on(CreateArgs.TargetType.HASH).build(); String result = redis.ftCreate(DOCUMENTS_INDEX, createArgs, Arrays.asList(vectorField, titleField, categoryField)); @@ -225,7 +226,7 @@ void testHnswVectorIndexWithFiltering() { FieldArgs yearField = NumericFieldArgs. builder().name("year").sortable().build(); FieldArgs ratingField = NumericFieldArgs. builder().name("rating").sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(MOVIE_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(MOVIE_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(MOVIES_INDEX, createArgs, Arrays.asList(vectorField, titleField, genreField, yearField, ratingField)); @@ -334,7 +335,7 @@ void testVectorRangeQueries() { FieldArgs typeField = TagFieldArgs. builder().name("type").build(); FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(PRODUCT_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(PRODUCT_PREFIX) .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(PRODUCTS_INDEX, createArgs, Arrays.asList(vectorField, nameField, typeField, priceField)); @@ -437,7 +438,7 @@ void testDistanceMetricsAndVectorTypes() { FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("test:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("test:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate(indexName, createArgs, Arrays.asList(vectorField, nameField)); @@ -491,7 +492,7 @@ void testJsonVectorStorage() { FieldArgs titleField = TextFieldArgs. builder().name("$.title").as("title").build(); FieldArgs categoryField = TagFieldArgs. builder().name("$.category").as("category").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("json:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("json:") .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate("json-vector-idx", createArgs, Arrays.asList(vectorField, titleField, categoryField)); @@ -558,7 +559,7 @@ void testAdvancedVectorSearchFeatures() { FieldArgs statusField = TagFieldArgs. builder().name("status").build(); FieldArgs priorityField = NumericFieldArgs. builder().name("priority").sortable().build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("task:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("task:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("tasks-idx", createArgs, Arrays.asList(vectorField, titleField, statusField, priorityField)); @@ -663,7 +664,7 @@ void testVectorTypesAndPrecision() { FieldArgs nameField = TextFieldArgs. builder().name("name").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("precision:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("precision:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("precision-idx", createArgs, Arrays.asList(float64Field, nameField)); @@ -738,7 +739,7 @@ void testVectorSearchErrorHandling() { .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(3).distanceMetric(VectorFieldArgs.DistanceMetric.COSINE) .build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix("error:") + CreateArgs createArgs = CreateArgs. builder().withPrefix("error:") .on(CreateArgs.TargetType.HASH).build(); redis.ftCreate("error-test-idx", createArgs, Collections.singletonList(vectorField)); @@ -778,4 +779,137 @@ void testVectorSearchErrorHandling() { redis.ftDropindex("error-test-idx"); } + /** + * Test vector search with mixed binary and text fields, following the Python example. This test demonstrates handling both + * binary vector data and text data in the same hash, with proper decoding of each field type. + */ + @Test + void testVectorSearchBinaryAndTextFields() { + // Create a custom codec that can handle both strings and byte arrays + RedisCodec mixedCodec = new RedisCodec() { + + @Override + public String decodeKey(ByteBuffer bytes) { + return StandardCharsets.UTF_8.decode(bytes).toString(); + } + + @Override + public Object decodeValue(ByteBuffer bytes) { + // Try to decode as UTF-8 string first + try { + String str = StandardCharsets.UTF_8.decode(bytes.duplicate()).toString(); + // Check if it's a valid UTF-8 string (no replacement characters) + if (!str.contains("\uFFFD")) { + return str; + } + } catch (Exception e) { + // Fall through to return raw bytes + } + // Return raw bytes for binary data + byte[] result = new byte[bytes.remaining()]; + bytes.get(result); + return result; + } + + @Override + public ByteBuffer encodeKey(String key) { + return ByteBuffer.wrap(key.getBytes(StandardCharsets.UTF_8)); + } + + @Override + public ByteBuffer encodeValue(Object value) { + if (value instanceof String) { + return ByteBuffer.wrap(((String) value).getBytes(StandardCharsets.UTF_8)); + } else if (value instanceof byte[]) { + return ByteBuffer.wrap((byte[]) value); + } else if (value instanceof float[]) { + float[] floats = (float[]) value; + ByteBuffer buffer = ByteBuffer.allocate(floats.length * 4).order(ByteOrder.LITTLE_ENDIAN); + for (float f : floats) { + buffer.putFloat(f); + } + return (ByteBuffer) buffer.flip(); + } else { + return ByteBuffer.wrap(value.toString().getBytes(StandardCharsets.UTF_8)); + } + } + + }; + + // Create connection with mixed codec + RedisCommands redisMixed = client.connect(mixedCodec).sync(); + + try { + // Create fake vector similar to Python example + float[] fakeVec = { 0.1f, 0.2f, 0.3f, 0.4f }; + byte[] fakeVecBytes = floatArrayToByteBuffer(fakeVec).array(); + + String indexName = "mixed_index"; + String keyName = indexName + ":1"; + + // Store mixed data: text field and binary vector field + redisMixed.hset(keyName, "first_name", "🥬 Lettuce"); + redisMixed.hset(keyName, "vector_emb", fakeVecBytes); + + // Create index with both text and vector fields + FieldArgs textField = TagFieldArgs. builder().name("first_name").build(); + + FieldArgs vectorField = VectorFieldArgs. builder().name("embeddings_bio").hnsw() + .type(VectorFieldArgs.VectorType.FLOAT32).dimensions(4) + .distanceMetric(VectorFieldArgs.DistanceMetric.COSINE).build(); + + CreateArgs createArgs = CreateArgs. builder().withPrefix(indexName + ":") + .on(CreateArgs.TargetType.HASH).build(); + + redis.ftCreate(indexName, createArgs, Arrays.asList(textField, vectorField)); + + // Search with specific field returns - equivalent to Python's return_field with decode_field=False + SearchArgs searchArgs = SearchArgs. builder().returnField("vector_emb") // This + // should + // return + // raw + // binary + // data + .returnField("first_name") // This should return decoded text + .build(); + + SearchReply results = redisMixed.ftSearch(indexName, "*", searchArgs); + + assertThat(results.getCount()).isEqualTo(1); + assertThat(results.getResults()).hasSize(1); + + SearchReply.SearchResult result = results.getResults().get(0); + Map fields = result.getFields(); + + // Verify text field is properly decoded + Object firstNameValue = fields.get("first_name"); + assertThat(firstNameValue).isInstanceOf(String.class); + assertThat((String) firstNameValue).isEqualTo("🥬 Lettuce"); + + // Verify vector field returns binary data + Object vectorValue = fields.get("vector_emb"); + assertThat(vectorValue).isInstanceOf(byte[].class); + + // Convert retrieved binary data back to float array and compare + byte[] retrievedVecBytes = (byte[]) vectorValue; + ByteBuffer buffer = ByteBuffer.wrap(retrievedVecBytes).order(ByteOrder.LITTLE_ENDIAN); + float[] retrievedVec = new float[4]; + for (int i = 0; i < 4; i++) { + retrievedVec[i] = buffer.getFloat(); + } + + // Assert that the vectors are equal (equivalent to Python's np.array_equal) + assertThat(retrievedVec).containsExactly(fakeVec); + + // Cleanup + redis.ftDropindex(indexName); + + } finally { + // Close the mixed codec connection + if (redisMixed != null) { + redisMixed.getStatefulConnection().close(); + } + } + } + } diff --git a/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java b/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java index 6a083b656e..d79417a471 100644 --- a/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RedisJsonIndexingIntegrationTests.java @@ -96,7 +96,7 @@ void testBasicJsonIndexingAndSearch() { FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); FieldArgs priceField = NumericFieldArgs. builder().name("$.price").as("price").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); String result = redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(nameField, descriptionField, priceField)); @@ -149,7 +149,7 @@ void testJsonArraysAsTagFields() { FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate(ITEM_INDEX_2, createArgs, Arrays.asList(colorsField, nameField, descriptionField)); @@ -201,7 +201,7 @@ void testJsonArraysAsTextFields() { FieldArgs nameField = TextFieldArgs. builder().name("$.name").as("name").build(); FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate(ITEM_INDEX_3, createArgs, Arrays.asList(colorsField, nameField, descriptionField)); @@ -237,7 +237,7 @@ void testJsonArraysAsNumericFields() { // FT.CREATE itemIdx4 ON JSON PREFIX 1 item: SCHEMA $.max_level AS dB NUMERIC FieldArgs dbField = NumericFieldArgs. builder().name("$.max_level").as("dB").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate(ITEM_INDEX_4, createArgs, Collections.singletonList(dbField)); @@ -279,7 +279,7 @@ void testFieldProjectionWithJsonPath() { FieldArgs descriptionField = TextFieldArgs. builder().name("$.description").as("description").build(); FieldArgs priceField = NumericFieldArgs. builder().name("$.price").as("price").build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(nameField, descriptionField, priceField)); @@ -334,7 +334,7 @@ void testJsonObjectIndexing() { FieldArgs connectionTypeField = TextFieldArgs. builder().name("$.connection.type").as("connectionType") .build(); - CreateArgs createArgs = CreateArgs. builder().addPrefix(ITEM_PREFIX) + CreateArgs createArgs = CreateArgs. builder().withPrefix(ITEM_PREFIX) .on(CreateArgs.TargetType.JSON).build(); redis.ftCreate(ITEM_INDEX, createArgs, Arrays.asList(wirelessField, connectionTypeField)); diff --git a/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java index 84523c6fd4..5adfe8f31e 100644 --- a/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java +++ b/src/test/java/io/lettuce/core/search/arguments/CreateArgsTest.java @@ -60,8 +60,8 @@ void testCreateArgsWithTargetType() { @Test void testCreateArgsWithPrefixes() { - CreateArgs args = CreateArgs. builder().addPrefix("blog:").addPrefix("post:") - .addPrefix("article:").build(); + CreateArgs args = CreateArgs. builder().withPrefix("blog:").withPrefix("post:") + .withPrefix("article:").build(); assertThat(args.getPrefixes()).containsExactly("blog:", "post:", "article:"); } @@ -99,8 +99,8 @@ void testCreateArgsWithPayloadField() { @Test void testCreateArgsWithFlags() { - CreateArgs args = CreateArgs. builder().maxTextFields(true).noOffsets(true) - .noHighlighting(true).noFields(true).noFrequency(true).skipInitialScan(true).build(); + CreateArgs args = CreateArgs. builder().maxTextFields().noOffsets().noHighlighting() + .noFields().noFrequency().skipInitialScan().build(); assertThat(args.isMaxTextFields()).isTrue(); assertThat(args.isNoOffsets()).isTrue(); @@ -135,9 +135,9 @@ void testCreateArgsWithEmptyStopWords() { @Test void testCreateArgsBuild() { CreateArgs args = CreateArgs. builder().on(CreateArgs.TargetType.JSON) - .addPrefix("blog:").addPrefix("post:").filter("@status:published").defaultLanguage(DocumentLanguage.FRENCH) - .languageField("lang").defaultScore(0.8).scoreField("score").payloadField("payload").maxTextFields(true) - .temporary(7200).noOffsets(true).noHighlighting(true).noFields(true).noFrequency(true).skipInitialScan(true) + .withPrefix("blog:").withPrefix("post:").filter("@status:published").defaultLanguage(DocumentLanguage.FRENCH) + .languageField("lang").defaultScore(0.8).scoreField("score").payloadField("payload").maxTextFields() + .temporary(7200).noOffsets().noHighlighting().noFields().noFrequency().skipInitialScan() .stopWords(Arrays.asList("le", "la", "et")).build(); CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); @@ -173,7 +173,7 @@ void testCreateArgsBuild() { @Test void testCreateArgsMinimalBuild() { - CreateArgs args = CreateArgs. builder().addPrefix("test:").build(); + CreateArgs args = CreateArgs. builder().withPrefix("test:").build(); CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); args.build(commandArgs); diff --git a/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java b/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java index 60ffcf7730..977914876e 100644 --- a/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java +++ b/src/test/java/io/lettuce/core/search/arguments/SearchArgsTest.java @@ -29,18 +29,16 @@ void testDefaultSearchArgs() { assertThat(args.isNoContent()).isFalse(); assertThat(args.isWithScores()).isFalse(); - assertThat(args.isWithPayloads()).isFalse(); assertThat(args.isWithSortKeys()).isFalse(); } @Test void testSearchArgsWithOptions() { - SearchArgs args = SearchArgs. builder().noContent().withScores().withPayloads() - .withSortKeys().verbatim().noStopWords().build(); + SearchArgs args = SearchArgs. builder().noContent().withScores().withSortKeys() + .verbatim().build(); assertThat(args.isNoContent()).isTrue(); assertThat(args.isWithScores()).isTrue(); - assertThat(args.isWithPayloads()).isTrue(); assertThat(args.isWithSortKeys()).isTrue(); } @@ -78,7 +76,7 @@ void testSearchArgsWithLimitAndTimeout() { @Test void testSearchArgsWithLanguageAndScoring() { SearchArgs args = SearchArgs. builder().language(DocumentLanguage.ENGLISH) - .scorer(ScoringFunction.TF_IDF).payload("test-payload").build(); + .scorer(ScoringFunction.TF_IDF).build(); CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); args.build(commandArgs); @@ -86,7 +84,6 @@ void testSearchArgsWithLanguageAndScoring() { String argsString = commandArgs.toString(); assertThat(argsString).contains("LANGUAGE"); assertThat(argsString).contains("SCORER"); - assertThat(argsString).contains("PAYLOAD"); } @Test @@ -121,11 +118,8 @@ void testSearchArgsWithHighlightAndSummarize() { HighlightArgs highlight = HighlightArgs. builder().field("title").tags("", "") .build(); - SummarizeArgs summarize = SummarizeArgs. builder().field("content").fragments(3) - .len(100).separator("...").build(); - - SearchArgs args = SearchArgs. builder().highlight(highlight).summarize(summarize) - .build(); + SearchArgs args = SearchArgs. builder().highlightArgs(highlight) + .summarizeField("content").summarizeFragments(3).summarizeLen(100).summarizeSeparator("...").build(); CommandArgs commandArgs = new CommandArgs<>(StringCodec.UTF8); args.build(commandArgs);