@@ -5,6 +5,7 @@ package integration
5
5
6
6
import (
7
7
"context"
8
+ "fmt"
8
9
"math/rand"
9
10
"os"
10
11
"path"
@@ -35,6 +36,163 @@ import (
35
36
"github.com/cortexproject/cortex/pkg/util/log"
36
37
)
37
38
39
+ func TestDisableChunkTrimmingFuzz (t * testing.T ) {
40
+ noneChunkTrimmingImage := "quay.io/cortexproject/cortex:v1.18.0"
41
+ s , err := e2e .NewScenario (networkName )
42
+ require .NoError (t , err )
43
+ defer s .Close ()
44
+
45
+ // Start dependencies.
46
+ consul1 := e2edb .NewConsulWithName ("consul1" )
47
+ consul2 := e2edb .NewConsulWithName ("consul2" )
48
+ require .NoError (t , s .StartAndWaitReady (consul1 , consul2 ))
49
+
50
+ flags1 := mergeFlags (
51
+ AlertmanagerLocalFlags (),
52
+ map [string ]string {
53
+ "-store.engine" : blocksStorageEngine ,
54
+ "-blocks-storage.backend" : "filesystem" ,
55
+ "-blocks-storage.tsdb.head-compaction-interval" : "4m" ,
56
+ "-blocks-storage.tsdb.block-ranges-period" : "2h" ,
57
+ "-blocks-storage.tsdb.ship-interval" : "1h" ,
58
+ "-blocks-storage.bucket-store.sync-interval" : "15m" ,
59
+ "-blocks-storage.tsdb.retention-period" : "2h" ,
60
+ "-blocks-storage.bucket-store.index-cache.backend" : tsdb .IndexCacheBackendInMemory ,
61
+ "-blocks-storage.bucket-store.bucket-index.enabled" : "true" ,
62
+ "-querier.query-store-for-labels-enabled" : "true" ,
63
+ // Ingester.
64
+ "-ring.store" : "consul" ,
65
+ "-consul.hostname" : consul1 .NetworkHTTPEndpoint (),
66
+ // Distributor.
67
+ "-distributor.replication-factor" : "1" ,
68
+ // Store-gateway.
69
+ "-store-gateway.sharding-enabled" : "false" ,
70
+ // alert manager
71
+ "-alertmanager.web.external-url" : "http://localhost/alertmanager" ,
72
+ },
73
+ )
74
+ flags2 := mergeFlags (
75
+ AlertmanagerLocalFlags (),
76
+ map [string ]string {
77
+ "-store.engine" : blocksStorageEngine ,
78
+ "-blocks-storage.backend" : "filesystem" ,
79
+ "-blocks-storage.tsdb.head-compaction-interval" : "4m" ,
80
+ "-blocks-storage.tsdb.block-ranges-period" : "2h" ,
81
+ "-blocks-storage.tsdb.ship-interval" : "1h" ,
82
+ "-blocks-storage.bucket-store.sync-interval" : "15m" ,
83
+ "-blocks-storage.tsdb.retention-period" : "2h" ,
84
+ "-blocks-storage.bucket-store.index-cache.backend" : tsdb .IndexCacheBackendInMemory ,
85
+ "-blocks-storage.bucket-store.bucket-index.enabled" : "true" ,
86
+ "-querier.query-store-for-labels-enabled" : "true" ,
87
+ // Ingester.
88
+ "-ring.store" : "consul" ,
89
+ "-consul.hostname" : consul2 .NetworkHTTPEndpoint (),
90
+ // Distributor.
91
+ "-distributor.replication-factor" : "1" ,
92
+ // Store-gateway.
93
+ "-store-gateway.sharding-enabled" : "false" ,
94
+ // alert manager
95
+ "-alertmanager.web.external-url" : "http://localhost/alertmanager" ,
96
+ },
97
+ )
98
+ // make alert manager config dir
99
+ require .NoError (t , writeFileToSharedDir (s , "alertmanager_configs" , []byte {}))
100
+
101
+ path1 := path .Join (s .SharedDir (), "cortex-1" )
102
+ path2 := path .Join (s .SharedDir (), "cortex-2" )
103
+
104
+ flags1 = mergeFlags (flags1 , map [string ]string {"-blocks-storage.filesystem.dir" : path1 })
105
+ flags2 = mergeFlags (flags2 , map [string ]string {"-blocks-storage.filesystem.dir" : path2 })
106
+ // Start Cortex replicas.
107
+ cortex1 := e2ecortex .NewSingleBinary ("cortex-1" , flags1 , "" )
108
+ cortex2 := e2ecortex .NewSingleBinary ("cortex-2" , flags2 , noneChunkTrimmingImage )
109
+ require .NoError (t , s .StartAndWaitReady (cortex1 , cortex2 ))
110
+
111
+ // Wait until Cortex replicas have updated the ring state.
112
+ require .NoError (t , cortex1 .WaitSumMetrics (e2e .Equals (float64 (512 )), "cortex_ring_tokens_total" ))
113
+ require .NoError (t , cortex2 .WaitSumMetrics (e2e .Equals (float64 (512 )), "cortex_ring_tokens_total" ))
114
+
115
+ c1 , err := e2ecortex .NewClient (cortex1 .HTTPEndpoint (), cortex1 .HTTPEndpoint (), "" , "" , "user-1" )
116
+ require .NoError (t , err )
117
+ c2 , err := e2ecortex .NewClient (cortex2 .HTTPEndpoint (), cortex2 .HTTPEndpoint (), "" , "" , "user-1" )
118
+ require .NoError (t , err )
119
+
120
+ now := time .Now ()
121
+ // Push some series to Cortex.
122
+ start := now .Add (- time .Minute * 120 )
123
+ scrapeInterval := 30 * time .Second
124
+
125
+ numSeries := 10
126
+ numSamples := 240
127
+ serieses := make ([]prompb.TimeSeries , numSeries )
128
+ lbls := make ([]labels.Labels , numSeries )
129
+ for i := 0 ; i < numSeries ; i ++ {
130
+ series := e2e .GenerateSeriesWithSamples (fmt .Sprintf ("test_series_%d" , i ), start , scrapeInterval , i * numSamples , numSamples , prompb.Label {Name : "foo" , Value : "bar" })
131
+ serieses [i ] = series
132
+
133
+ builder := labels .NewBuilder (labels .EmptyLabels ())
134
+ for _ , lbl := range series .Labels {
135
+ builder .Set (lbl .Name , lbl .Value )
136
+ }
137
+ lbls [i ] = builder .Labels ()
138
+ }
139
+
140
+ res , err := c1 .Push (serieses )
141
+ require .NoError (t , err )
142
+ require .Equal (t , 200 , res .StatusCode )
143
+
144
+ res , err = c2 .Push (serieses )
145
+ require .NoError (t , err )
146
+ require .Equal (t , 200 , res .StatusCode )
147
+
148
+ rnd := rand .New (rand .NewSource (now .Unix ()))
149
+ opts := []promqlsmith.Option {
150
+ promqlsmith .WithEnableOffset (true ),
151
+ promqlsmith .WithEnableAtModifier (true ),
152
+ }
153
+ ps := promqlsmith .New (rnd , lbls , opts ... )
154
+
155
+ type testCase struct {
156
+ query string
157
+ res1 , res2 model.Value
158
+ err1 , err2 error
159
+ }
160
+
161
+ queryStart := time .Now ().Add (- time .Minute * 40 )
162
+ queryEnd := time .Now ().Add (- time .Minute * 20 )
163
+ cases := make ([]* testCase , 0 , 200 )
164
+ for i := 0 ; i < 200 ; i ++ {
165
+ expr := ps .WalkRangeQuery ()
166
+ query := expr .Pretty (0 )
167
+ res1 , err1 := c1 .QueryRange (query , queryStart , queryEnd , scrapeInterval )
168
+ res2 , err2 := c2 .QueryRange (query , queryStart , queryEnd , scrapeInterval )
169
+ cases = append (cases , & testCase {
170
+ query : query ,
171
+ res1 : res1 ,
172
+ res2 : res2 ,
173
+ err1 : err1 ,
174
+ err2 : err2 ,
175
+ })
176
+ }
177
+
178
+ failures := 0
179
+ for i , tc := range cases {
180
+ qt := "range query"
181
+ if tc .err1 != nil || tc .err2 != nil {
182
+ if ! cmp .Equal (tc .err1 , tc .err2 ) {
183
+ t .Logf ("case %d error mismatch.\n %s: %s\n err1: %v\n err2: %v\n " , i , qt , tc .query , tc .err1 , tc .err2 )
184
+ failures ++
185
+ }
186
+ } else if ! cmp .Equal (tc .res1 , tc .res2 , comparer ) {
187
+ t .Logf ("case %d results mismatch.\n %s: %s\n res1: %s\n res2: %s\n " , i , qt , tc .query , tc .res1 .String (), tc .res2 .String ())
188
+ failures ++
189
+ }
190
+ }
191
+ if failures > 0 {
192
+ require .Failf (t , "finished query fuzzing tests" , "%d test cases failed" , failures )
193
+ }
194
+ }
195
+
38
196
func TestVerticalShardingFuzz (t * testing.T ) {
39
197
s , err := e2e .NewScenario (networkName )
40
198
require .NoError (t , err )
@@ -159,7 +317,6 @@ func TestVerticalShardingFuzz(t *testing.T) {
159
317
instantQuery bool
160
318
}
161
319
162
- now = time .Now ()
163
320
cases := make ([]* testCase , 0 , 200 )
164
321
for i := 0 ; i < 100 ; i ++ {
165
322
expr := ps .WalkInstantQuery ()
0 commit comments