@@ -13,6 +13,12 @@ import (
13
13
"github.com/stretchr/testify/require"
14
14
)
15
15
16
+ // size of the channel used for the visitor, defined in goka/config.go
17
+ var (
18
+ visitChannelSize = 100
19
+ numPartitions = 10
20
+ )
21
+
16
22
// TestProcessorVisit tests the visiting functionality.
17
23
func TestProcessorVisit (t * testing.T ) {
18
24
brokers := initSystemTest (t )
@@ -34,7 +40,7 @@ func TestProcessorVisit(t *testing.T) {
34
40
}
35
41
36
42
createEmitter := func (topic goka.Stream ) (* goka.Emitter , func ()) {
37
- err = tm .EnsureStreamExists (string (topic ), 10 )
43
+ err = tm .EnsureStreamExists (string (topic ), numPartitions )
38
44
require .NoError (t , err )
39
45
40
46
em , err := goka .NewEmitter (brokers , topic , new (codec.Int64 ),
@@ -90,7 +96,7 @@ func TestProcessorVisit(t *testing.T) {
90
96
91
97
pollTimed (t , "recovered" , proc .Recovered )
92
98
93
- em .EmitSync ("value1" , int64 (1 ))
99
+ _ = em .EmitSync ("value1" , int64 (1 ))
94
100
95
101
pollTimed (t , "value-ok" , func () bool {
96
102
val1 , _ := proc .Get ("value1" )
@@ -114,7 +120,7 @@ func TestProcessorVisit(t *testing.T) {
114
120
115
121
pollTimed (t , "recovered" , proc .Recovered )
116
122
117
- em .EmitSync ("value1" , int64 (1 ))
123
+ _ = em .EmitSync ("value1" , int64 (1 ))
118
124
119
125
pollTimed (t , "value-ok" , func () bool {
120
126
val1 , _ := proc .Get ("value1" )
@@ -128,6 +134,90 @@ func TestProcessorVisit(t *testing.T) {
128
134
require .Error (t , <- done )
129
135
})
130
136
137
+ // Tests if a panic occurs while visiting, while the iterator is still pushing
138
+ // messages into the partition processor's visit-channel.
139
+ // Regression test for https://github.com/lovoo/goka/issues/433
140
+ t .Run ("visit-panic-slow" , func (t * testing.T ) {
141
+ group , input := nextTopics ()
142
+ em , finish := createEmitter (input )
143
+ defer finish ()
144
+ proc , cancel , done := runProc (createProc (group , input , 500 * time .Millisecond ))
145
+
146
+ pollTimed (t , "recovered" , proc .Recovered )
147
+
148
+ // create twice as many items in the table as the visit-channel's size.
149
+ // This way we can make sure that the visitor will have to block on
150
+ // pushing it to the partition-processor visitInputChannel.
151
+ numMsgs := visitChannelSize * numPartitions * 2
152
+ for i := 0 ; i < numMsgs ; i ++ {
153
+ _ , _ = em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
154
+ }
155
+
156
+ // wait for all messages to have propagated
157
+ pollTimed (t , "value-ok" , func () bool {
158
+ val1 , _ := proc .Get (fmt .Sprintf ("value-%d" , numMsgs - 1 ))
159
+ return val1 != nil && val1 .(int64 ) == 1
160
+ })
161
+
162
+ // pass wrong type to visitor -> which will be passed to the visit --> will panic
163
+ require .Error (t , proc .VisitAll (context .Background (), "visitor" , "asdf" ))
164
+
165
+ // no need to cancel, the visitAll will kill the processor.
166
+ _ = cancel
167
+ require .Error (t , <- done )
168
+ })
169
+
170
+ // Verifies a visit is gracefully shutdown when the processor is canceled while
171
+ // the visit is running.
172
+ t .Run ("visit-shutdown-slow" , func (t * testing.T ) {
173
+ group , input := nextTopics ()
174
+ em , finish := createEmitter (input )
175
+ defer finish ()
176
+ proc , cancel , done := runProc (createProc (group , input , 1 * time .Second ))
177
+
178
+ pollTimed (t , "recovered" , proc .Recovered )
179
+
180
+ // create twice as many items in the table as the visit-channel's size.
181
+ // This way we can make sure that the visitor will have to block on
182
+ // pushing it to the partition-processor visitInputChannel.
183
+ numMsgs := visitChannelSize * numPartitions * 2
184
+ for i := 0 ; i < numMsgs ; i ++ {
185
+ _ , _ = em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
186
+ }
187
+
188
+ // wait for all messages to have propagated
189
+ pollTimed (t , "value-ok" , func () bool {
190
+ val1 , _ := proc .Get (fmt .Sprintf ("value-%d" , numMsgs - 1 ))
191
+ return val1 != nil && val1 .(int64 ) == 1
192
+ })
193
+
194
+ visitCtx , visitCancel := context .WithCancel (context .Background ())
195
+ defer visitCancel ()
196
+
197
+ var (
198
+ visitErr error
199
+ visitDone = make (chan struct {})
200
+ )
201
+
202
+ // start the visitor
203
+ go func () {
204
+ defer close (visitDone )
205
+ visitErr = proc .VisitAll (visitCtx , "visitor" , int64 (25 ))
206
+ }()
207
+
208
+ // wait half of what the processor takes for message to process, so we can stop it in the middle
209
+ time .Sleep (500 * time .Millisecond )
210
+ // stop the visit
211
+ visitCancel ()
212
+
213
+ // wait for visiting done
214
+ <- visitDone
215
+ require .ErrorContains (t , visitErr , "canceled" )
216
+
217
+ cancel ()
218
+ require .NoError (t , <- done )
219
+ })
220
+
131
221
t .Run ("visit-shutdown" , func (t * testing.T ) {
132
222
group , input := nextTopics ()
133
223
em , finish := createEmitter (input )
@@ -138,8 +228,8 @@ func TestProcessorVisit(t *testing.T) {
138
228
139
229
// emit two values where goka.DefaultHasher says they're in the same partition.
140
230
// We need to achieve this to test that a shutdown will visit one value but not the other
141
- em .EmitSync ("0" , int64 (1 ))
142
- em .EmitSync ("02" , int64 (1 ))
231
+ _ = em .EmitSync ("0" , int64 (1 ))
232
+ _ = em .EmitSync ("02" , int64 (1 ))
143
233
144
234
pollTimed (t , "value-ok" , func () bool {
145
235
val1 , _ := proc .Get ("02" )
@@ -196,7 +286,7 @@ func TestProcessorVisit(t *testing.T) {
196
286
defer emFinish ()
197
287
// create the group table manually, otherwise the proc and the view are racing
198
288
199
- tm .EnsureTableExists (string (goka .GroupTable (group )), 10 )
289
+ _ = tm .EnsureTableExists (string (goka .GroupTable (group )), 10 )
200
290
// scenario: sleep in visit, processor shuts down--> visit should cancel too
201
291
proc , cancel , done := runProc (createProc (group , input , 500 * time .Millisecond ))
202
292
view , viewCancel , viewDone := runView (createView (group ))
@@ -207,7 +297,7 @@ func TestProcessorVisit(t *testing.T) {
207
297
// emit two values where goka.DefaultHasher says they're in the same partition.
208
298
// We need to achieve this to test that a shutdown will visit one value but not the other
209
299
for i := 0 ; i < 100 ; i ++ {
210
- em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
300
+ _ , _ = em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
211
301
}
212
302
// emFinish()
213
303
@@ -251,7 +341,7 @@ func TestProcessorVisit(t *testing.T) {
251
341
em , finish := createEmitter (input )
252
342
defer finish ()
253
343
// create the group table manually, otherwise the proc and the view are racing
254
- tm .EnsureTableExists (string (goka .GroupTable (group )), 10 )
344
+ _ = tm .EnsureTableExists (string (goka .GroupTable (group )), 10 )
255
345
// scenario: sleep in visit, processor shuts down--> visit should cancel too
256
346
proc1 , cancel1 , done1 := runProc (createProc (group , input , 500 * time .Millisecond ))
257
347
@@ -260,7 +350,7 @@ func TestProcessorVisit(t *testing.T) {
260
350
// emit two values where goka.DefaultHasher says they're in the same partition.
261
351
// We need to achieve this to test that a shutdown will visit one value but not the other
262
352
for i := 0 ; i < 100 ; i ++ {
263
- em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
353
+ _ , _ = em .Emit (fmt .Sprintf ("value-%d" , i ), int64 (1 ))
264
354
}
265
355
266
356
// poll until all values are there
0 commit comments