Skip to content

Commit 85b8294

Browse files
Merge pull request #916 from KarthikSubbarao/1.2-ga
Merge from main into 1.2
2 parents e9fdc78 + 88e95cf commit 85b8294

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+782
-430
lines changed

.github/benchmark_configs/fts-benchmarks-arm.json

Lines changed: 57 additions & 149 deletions
Original file line numberDiff line numberDiff line change
@@ -477,6 +477,11 @@
477477
"type": "vector",
478478
"doc_count": 100,
479479
"dimensions": 256
480+
},
481+
"composed_tag_queries.csv": {
482+
"type": "tag_only",
483+
"doc_count": 100,
484+
"tags": ["electronics", "books", "clothing", "food", "sports"]
480485
}
481486
},
482487
"test_groups": [
@@ -1052,6 +1057,21 @@
10521057
"": "",
10531058
"NOCONTENT": "_nocontent"
10541059
}
1060+
},
1061+
{
1062+
"id": "e",
1063+
"type": "read",
1064+
"cluster_execution": "parallel",
1065+
"description": "NUMERIC + TAG composed (entries fetcher optimization test)",
1066+
"dataset": "datasets/composed_tag_queries.csv",
1067+
"clients": 1000,
1068+
"duration": 200,
1069+
"warmup": 60,
1070+
"command": "FT.SEARCH rd0 \"@price:[100 500] @category:{__field:category__}\"",
1071+
"options": {
1072+
"": "",
1073+
"NOCONTENT": "_nocontent"
1074+
}
10551075
}
10561076
]
10571077
},
@@ -1152,54 +1172,6 @@
11521172
}
11531173
}
11541174
},
1155-
{
1156-
"id": "b",
1157-
"type": "write",
1158-
"cluster_execution": "single",
1159-
"description": "1K docs × 100K tokens cyclic-8193 - position byte size increase",
1160-
"setup_commands": [
1161-
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- STOPWORDS 0 SCHEMA field1 TEXT NOSTEM"
1162-
],
1163-
"flush_before": true,
1164-
"dataset": "datasets/posting_cyclic8193_1k_100k.csv",
1165-
"duration": 200,
1166-
"keyspacelen": 1000,
1167-
"clients": 100,
1168-
"sequential": true,
1169-
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\"",
1170-
"profiling": {
1171-
"delays": {
1172-
"write": {
1173-
"delay": 5,
1174-
"duration": 10
1175-
}
1176-
}
1177-
}
1178-
},
1179-
{
1180-
"id": "c",
1181-
"type": "write",
1182-
"cluster_execution": "single",
1183-
"description": "10K docs × 50 fields × 1K tokens - field mask byte size increase",
1184-
"setup_commands": [
1185-
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- SCHEMA field1 TEXT field2 TEXT field3 TEXT field4 TEXT field5 TEXT field6 TEXT field7 TEXT field8 TEXT field9 TEXT field10 TEXT field11 TEXT field12 TEXT field13 TEXT field14 TEXT field15 TEXT field16 TEXT field17 TEXT field18 TEXT field19 TEXT field20 TEXT field21 TEXT field22 TEXT field23 TEXT field24 TEXT field25 TEXT field26 TEXT field27 TEXT field28 TEXT field29 TEXT field30 TEXT field31 TEXT field32 TEXT field33 TEXT field34 TEXT field35 TEXT field36 TEXT field37 TEXT field38 TEXT field39 TEXT field40 TEXT field41 TEXT field42 TEXT field43 TEXT field44 TEXT field45 TEXT field46 TEXT field47 TEXT field48 TEXT field49 TEXT field50 TEXT"
1186-
],
1187-
"flush_before": true,
1188-
"dataset": "datasets/wiki_10k_50field_1ktok.csv",
1189-
"duration": 200,
1190-
"keyspacelen": 10000,
1191-
"clients": 1000,
1192-
"sequential": true,
1193-
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\" field2 \"__field:field2__\" field3 \"__field:field3__\" field4 \"__field:field4__\" field5 \"__field:field5__\" field6 \"__field:field6__\" field7 \"__field:field7__\" field8 \"__field:field8__\" field9 \"__field:field9__\" field10 \"__field:field10__\" field11 \"__field:field11__\" field12 \"__field:field12__\" field13 \"__field:field13__\" field14 \"__field:field14__\" field15 \"__field:field15__\" field16 \"__field:field16__\" field17 \"__field:field17__\" field18 \"__field:field18__\" field19 \"__field:field19__\" field20 \"__field:field20__\" field21 \"__field:field21__\" field22 \"__field:field22__\" field23 \"__field:field23__\" field24 \"__field:field24__\" field25 \"__field:field25__\" field26 \"__field:field26__\" field27 \"__field:field27__\" field28 \"__field:field28__\" field29 \"__field:field29__\" field30 \"__field:field30__\" field31 \"__field:field31__\" field32 \"__field:field32__\" field33 \"__field:field33__\" field34 \"__field:field34__\" field35 \"__field:field35__\" field36 \"__field:field36__\" field37 \"__field:field37__\" field38 \"__field:field38__\" field39 \"__field:field39__\" field40 \"__field:field40__\" field41 \"__field:field41__\" field42 \"__field:field42__\" field43 \"__field:field43__\" field44 \"__field:field44__\" field45 \"__field:field45__\" field46 \"__field:field46__\" field47 \"__field:field47__\" field48 \"__field:field48__\" field49 \"__field:field49__\" field50 \"__field:field50__\"",
1194-
"profiling": {
1195-
"delays": {
1196-
"write": {
1197-
"delay": 5,
1198-
"duration": 10
1199-
}
1200-
}
1201-
}
1202-
},
12031175
{
12041176
"id": "d",
12051177
"type": "write",
@@ -1224,30 +1196,6 @@
12241196
}
12251197
}
12261198
}
1227-
},
1228-
{
1229-
"id": "e",
1230-
"type": "write",
1231-
"cluster_execution": "single",
1232-
"description": "100K docs × 20 fields × 10 tokens from set of 10 - small pos map (<128 bytes)",
1233-
"setup_commands": [
1234-
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- STOPWORDS 0 SCHEMA field1 TEXT NOSTEM field2 TEXT NOSTEM field3 TEXT NOSTEM field4 TEXT NOSTEM field5 TEXT NOSTEM field6 TEXT NOSTEM field7 TEXT NOSTEM field8 TEXT NOSTEM field9 TEXT NOSTEM field10 TEXT NOSTEM field11 TEXT NOSTEM field12 TEXT NOSTEM field13 TEXT NOSTEM field14 TEXT NOSTEM field15 TEXT NOSTEM field16 TEXT NOSTEM field17 TEXT NOSTEM field18 TEXT NOSTEM field19 TEXT NOSTEM field20 TEXT NOSTEM"
1235-
],
1236-
"flush_before": true,
1237-
"dataset": "datasets/random_set_100k_20field_10tok.csv",
1238-
"duration": 200,
1239-
"keyspacelen": 100000,
1240-
"clients": 1000,
1241-
"sequential": true,
1242-
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\" field2 \"__field:field2__\" field3 \"__field:field3__\" field4 \"__field:field4__\" field5 \"__field:field5__\" field6 \"__field:field6__\" field7 \"__field:field7__\" field8 \"__field:field8__\" field9 \"__field:field9__\" field10 \"__field:field10__\" field11 \"__field:field11__\" field12 \"__field:field12__\" field13 \"__field:field13__\" field14 \"__field:field14__\" field15 \"__field:field15__\" field16 \"__field:field16__\" field17 \"__field:field17__\" field18 \"__field:field18__\" field19 \"__field:field19__\" field20 \"__field:field20__\"",
1243-
"profiling": {
1244-
"delays": {
1245-
"write": {
1246-
"delay": 5,
1247-
"duration": 10
1248-
}
1249-
}
1250-
}
12511199
}
12521200
]
12531201
},
@@ -1290,7 +1238,7 @@
12901238
"dataset": "datasets/radix_uuid_10k_1ktok.csv",
12911239
"duration": 200,
12921240
"keyspacelen": 10000,
1293-
"clients": 1000,
1241+
"clients": 500,
12941242
"sequential": true,
12951243
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\"",
12961244
"profiling": {
@@ -1332,31 +1280,6 @@
13321280
"group": 14,
13331281
"description": "Misc Tests - String intern, schema options, extreme case",
13341282
"scenarios": [
1335-
{
1336-
"id": "a",
1337-
"type": "write",
1338-
"cluster_execution": "single",
1339-
"description": "100K docs × 1 field × 100 tokens - 128-char key names",
1340-
"setup_commands": [
1341-
"FT.CREATE rd0 ON HASH SCHEMA field1 TEXT"
1342-
],
1343-
"flush_before": true,
1344-
"dataset": "datasets/wiki_100k_1field_100tok_uuidkey.xml",
1345-
"xml_root_element": "doc",
1346-
"duration": 200,
1347-
"keyspacelen": 100000,
1348-
"clients": 1000,
1349-
"sequential": true,
1350-
"command": "HSET __field:keyname__ field1 \"__field:field1__\"",
1351-
"profiling": {
1352-
"delays": {
1353-
"write": {
1354-
"delay": 5,
1355-
"duration": 10
1356-
}
1357-
}
1358-
}
1359-
},
13601283
{
13611284
"id": "b",
13621285
"type": "write",
@@ -1382,31 +1305,6 @@
13821305
}
13831306
}
13841307
},
1385-
{
1386-
"id": "c",
1387-
"type": "write",
1388-
"cluster_execution": "single",
1389-
"description": "50K docs × 10 fields × 100 tokens - WITHSUFFIXTRIE",
1390-
"setup_commands": [
1391-
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- SCHEMA field1 TEXT WITHSUFFIXTRIE field2 TEXT WITHSUFFIXTRIE field3 TEXT WITHSUFFIXTRIE field4 TEXT WITHSUFFIXTRIE field5 TEXT WITHSUFFIXTRIE field6 TEXT WITHSUFFIXTRIE field7 TEXT WITHSUFFIXTRIE field8 TEXT WITHSUFFIXTRIE field9 TEXT WITHSUFFIXTRIE field10 TEXT WITHSUFFIXTRIE"
1392-
],
1393-
"flush_before": true,
1394-
"dataset": "datasets/wiki_50k_10field_100tok.xml",
1395-
"xml_root_element": "doc",
1396-
"duration": 200,
1397-
"keyspacelen": 50000,
1398-
"clients": 1000,
1399-
"sequential": true,
1400-
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\" field2 \"__field:field2__\" field3 \"__field:field3__\" field4 \"__field:field4__\" field5 \"__field:field5__\" field6 \"__field:field6__\" field7 \"__field:field7__\" field8 \"__field:field8__\" field9 \"__field:field9__\" field10 \"__field:field10__\"",
1401-
"profiling": {
1402-
"delays": {
1403-
"write": {
1404-
"delay": 5,
1405-
"duration": 10
1406-
}
1407-
}
1408-
}
1409-
},
14101308
{
14111309
"id": "d",
14121310
"type": "write",
@@ -1432,31 +1330,6 @@
14321330
}
14331331
}
14341332
},
1435-
{
1436-
"id": "e",
1437-
"type": "write",
1438-
"cluster_execution": "single",
1439-
"description": "50K docs × 10 fields × 100 tokens - STOPWORDS 0",
1440-
"setup_commands": [
1441-
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- STOPWORDS 0 SCHEMA field1 TEXT field2 TEXT field3 TEXT field4 TEXT field5 TEXT field6 TEXT field7 TEXT field8 TEXT field9 TEXT field10 TEXT"
1442-
],
1443-
"flush_before": true,
1444-
"dataset": "datasets/wiki_50k_10field_100tok.xml",
1445-
"xml_root_element": "doc",
1446-
"duration": 200,
1447-
"keyspacelen": 50000,
1448-
"clients": 1000,
1449-
"sequential": true,
1450-
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:field1__\" field2 \"__field:field2__\" field3 \"__field:field3__\" field4 \"__field:field4__\" field5 \"__field:field5__\" field6 \"__field:field6__\" field7 \"__field:field7__\" field8 \"__field:field8__\" field9 \"__field:field9__\" field10 \"__field:field10__\"",
1451-
"profiling": {
1452-
"delays": {
1453-
"write": {
1454-
"delay": 5,
1455-
"duration": 10
1456-
}
1457-
}
1458-
}
1459-
},
14601333
{
14611334
"id": "f",
14621335
"type": "write",
@@ -1555,6 +1428,41 @@
15551428
"command": "FT.SEARCH rd0 \"(@price:[100 500])=>[KNN 10 @embedding $vec AS score]\" PARAMS 2 vec \"__field:query_vector__\" RETURN 2 price score DIALECT 2"
15561429
}
15571430
]
1431+
},
1432+
{
1433+
"group": 16,
1434+
"description": "Composed Queries - Nominal Case (entries fetcher test)",
1435+
"scenarios": [
1436+
{
1437+
"id": "a",
1438+
"type": "write",
1439+
"cluster_execution": "single",
1440+
"setup_commands": [
1441+
"FT.CREATE rd0 ON HASH PREFIX 1 rd0- SCHEMA field1 TEXT field2 TEXT"
1442+
],
1443+
"flush_before": true,
1444+
"dataset": "datasets/proximity_phrases.csv",
1445+
"maxdocs": 50000,
1446+
"clients": 1000,
1447+
"sequential": true,
1448+
"command": "HSET rd0-{tag}:__rand_int__ field1 \"__field:term1__\" field2 \"__field:term2__\""
1449+
},
1450+
{
1451+
"id": "b",
1452+
"type": "read",
1453+
"cluster_execution": "parallel",
1454+
"description": "TEXT + TEXT cross-field composed (entries fetcher optimization test)",
1455+
"dataset": "datasets/proximity_phrases.csv",
1456+
"clients": 1000,
1457+
"duration": 200,
1458+
"warmup": 60,
1459+
"command": "FT.SEARCH rd0 \"@field1:__field:term1__ @field2:__field:term2__\"",
1460+
"options": {
1461+
"": "",
1462+
"NOCONTENT": "_nocontent"
1463+
}
1464+
}
1465+
]
15581466
}
15591467
],
15601468
"port": 6379,
@@ -1621,4 +1529,4 @@
16211529
}
16221530
]
16231531
}
1624-
]
1532+
]

docs/topics/search-configurables.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ The search module uses the Valkey configuration mechanism. Thus each of the name
2828
| search.local-fanout-queue-wait-threshold | Number | | Queue wait threshold in milliseconds for preferring local node in fanout operations |
2929
| search.thread-pool-wait-time-samples | Number | | Sample queue size for thread pool wait time tracking |
3030
| search.max-term-expansions | Number | | Maximum number of words to search in text operations (prefix, suffix, fuzzy) to limit memory usage |
31+
| search.tag-min-prefix-length | Number | | Minimum number of characters required before trailing `*` in TAG wildcard queries (length excludes `*`) |
3132
| search.search-result-buffer-multiplier | String | | Multiplier for search result buffer size allocation |
3233
| search.drain-mutation-queue-on-save | Boolean | | Drain the mutation queue before RDB save |
3334
| search.query-string-depth | Number | | Controls the depth of the query string parsing from the FT.SEARCH cmd |

0 commit comments

Comments
 (0)