-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathImageProcessor.py
executable file
·1207 lines (1170 loc) · 40.2 KB
/
ImageProcessor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# coding: Latin-1
# Load library functions we want
import time
import datetime
import threading
import cv2
import numpy
import math
import random
import Globals
# Mode values
READY_TO_RACE = 1
WAIT_FOR_LIGHTS = 2
FOLLOW_TRACK = 3
CRASHED = 4
FLIPPED = 5
WRONG_WAY = 6
RACE_OVER = 7
FIRST_STRAIGHT = 8
# Light sequence
READY_OFF = 1
FIRST_GREEN = 2
SECOND_RED = 3
THIRD_GREEN_GO = 4
# Logging levels
LOG_CRITICAL = 1
LOG_MAJOR = 2
LOG_MINOR = 3
# Mappings for output
lineIndexToName = {
0 : 'wall | red',
1 : 'red | blue',
2 : 'blue | red',
3 : 'red | green',
4 : 'green | blue',
5 : 'blue | green',
6 : 'green | wall'
}
lineIndexToColour = {
0 : (255, 0, 0),
1 : (255, 0, 127),
2 : (127, 0, 255),
3 : (255, 255, 0),
4 : ( 0, 127, 255),
5 : ( 0, 255, 127),
6 : ( 0, 255, 0),
7 : ( 0, 0, 0)
}
lineIndexToOffset = {
0 : +3.0,
1 : +2.0,
2 : +1.0,
3 : 0.0,
4 : -1.0,
5 : -2.0,
6 : -3.0
}
def rgb2bgr((r, g, b)):
return b, g, r
def SetImageMode(newMode):
Globals.lastImageMode = Globals.imageMode
Globals.imageMode = newMode
def FullTimeStamp():
stamp = datetime.datetime.now()
return stamp.strftime('%Y %m %d - %H-%M-%S')
def TimeOnlyStamp():
stamp = datetime.datetime.now()
return stamp.strftime('%H:%M:%S.%f')
def LogData(logLevel, logString):
logLine = '%s %s\n' % (TimeOnlyStamp(), logString)
if logLevel <= Globals.processingPrintLogLevel:
print logLine
if Globals.processingLogFile:
if logLevel <= Globals.processingWriteLogLevel:
Globals.processingLogFile.write(logLine)
# PID processing thread
class ControlLoop(threading.Thread):
def __init__(self, autoRun = True):
super(ControlLoop, self).__init__()
self.event = threading.Event()
self.lock = threading.Lock()
self.terminated = False
self.eventWait = 2.0 / Settings.frameRate
self.userTargetLane = 0.0
self.lastStartMarker = time.time()
self.Reset()
if autoRun:
LogData(LOG_CRITICAL, 'Control loop thread started with idle time of %.2fs' % (self.eventWait))
self.start()
else:
LogData(LOG_CRITICAL, 'Control loop loaded and waiting for commands')
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
self.event.wait(self.eventWait)
if self.event.isSet():
if self.terminated:
break
try:
# Read the next set of values
sample = self.nextSample
self.RunLoop(sample)
self.lastSample = sample
finally:
# Reset the event trigger
self.event.clear()
LogData(LOG_CRITICAL, 'Control loop thread terminated')
def Reset(self):
with self.lock:
self.__Reset__()
def __Reset__(self):
# Set everything to a clean starting state
self.moving = False
self.i0 = 0.0
self.i1 = 0.0
self.i2 = 0.0
self.lastD0 = 0.0
self.lastD1 = 0.0
self.lastD2 = 0.0
self.clipMax = Settings.clipI
self.clipMin = -Settings.clipI
self.lastSample = (0.0, 0.0, 0.0, 0.0)
self.firTaps = Settings.firTaps
self.firHistorySpeed = []
self.firHistorySteering = []
self.autoTargetLane = 0.0
self.stuckFrameCount = 0
self.overrideStuckTicks = 0
self.firstStraightTicks = None
self.wrongWayCount = 0
self.wrongWayTicks = 0
self.huntColours = (False, False, False)
self.huntLeft = False
self.flippedImageCount = 0
self.stuckSpeed = -1.0
self.overtaking = False
self.overtakeRemainingTicks = 0
self.overtakeBrakingTicks = 0
self.unknownPointCount = 0
self.unknownPointAverage = 0.5
self.lastSpeed = 0.0
self.lastSteering = 0.0
self.accumulateDistance = False
self.SetDrive(0.0, 0.0)
def SetDrive(self, speed, steering):
# Make sure speed and steering are within limits
if steering < -1.0:
steering = -1.0
elif steering > 1.0:
steering = 1.0
if speed < -1.0:
speed = -1.0
elif speed > 1.0:
speed = 1.0
# Final steering corrections
steering *= Settings.steeringGain
steering += Settings.steeringOffset
if steering < -Settings.steeringClip:
steering = -Settings.steeringClip
elif steering > Settings.steeringClip:
steering = Settings.steeringClip
# Determine the individual drive power levels
driveLeft = speed
driveRight = speed
if steering < -0.01:
# Turning left
driveLeft *= 1.0 + steering
elif steering > 0.01:
# Turning right
driveRight *= 1.0 - steering
# Set the motors to the new speeds
Globals.MonsterMotors(driveLeft, driveRight)
if (Globals.frameAnnounce == 0):
LogData(LOG_MINOR, 'Motors: %+.3f, %+.3f' % (driveLeft, driveRight))
# Calculate the travelled distance between the last two frames
self.IncreaseDistance()
self.lastSpeed = speed
self.lastSteering = steering
def IncreaseDistance(self):
if self.accumulateDistance and (self.lastSpeed != 0):
# Work out motor speed based on steering and power applied
turningSpeed = abs(self.lastSteering)
straightSpeed = 1.0 - turningSpeed
calculatedSpeed = (turningSpeed * Settings.monsterSpeedFullSteering)
calculatedSpeed += (straightSpeed * Settings.monsterSpeed)
if calculatedSpeed < 0.0:
calculatedSpeed = 0.0
calculatedSpeed *= self.lastSpeed
# Work out the distance along the track axis based on the angle of travel
hyp = calculatedSpeed * Settings.monsterDistancePerFrame
opp = self.changeD0 * Settings.laneWidth
hyp2 = hyp ** 2
opp2 = opp **2
if hyp2 >= opp2:
adj = math.sqrt(hyp2 - opp2)
else:
# Offset change is too sharp, larger than the speed!
# This usually indicates being knocked or processing errors
adj = 0.0
# Work out the speed correction for the 'lane' over the last couple of frames
laneLength = (self.distanceD0 * Settings.trackChangePerLane) + Settings.trackLengthCenter
laneSpeedCorrection = Settings.trackLengthCenter / laneLength
# Apply the speed correction to the calculated distance along the track
trackDistanceTravelled = adj * laneSpeedCorrection
# Add the distance travelled to the distance so far
Globals.lapTravelled += trackDistanceTravelled
def FirFilter(self, speed, steering):
# Filtering for speed and steering
self.firHistorySpeed.append(speed)
self.firHistorySteering.append(steering)
self.firHistorySpeed = self.firHistorySpeed[-self.firTaps:]
self.firHistorySteering = self.firHistorySteering[-self.firTaps:]
filteredSpeed = numpy.mean(self.firHistorySpeed)
filteredSteering = numpy.mean(self.firHistorySteering)
# Run any override conditions
filteredSpeed, filteredSteering = self.PerformOverrides(filteredSpeed, filteredSteering)
self.SetDrive(filteredSpeed, filteredSteering)
def PerformOverrides(self, filteredSpeed, filteredSteering):
# Default to keeping the filtered values
overrideSpeed = filteredSpeed
overrideSteering = filteredSteering
checkForOverrides = False
calculateDistance = True
# Check for existing override conditions
if Globals.imageMode == READY_TO_RACE:
# Waiting for commands, set motors off
overrideSpeed = 0.0
elif Globals.imageMode == WAIT_FOR_LIGHTS:
# Waiting for the lights sequence, set motors off
overrideSpeed = 0.0
elif Globals.imageMode == FOLLOW_TRACK:
# Normal driving, proceed to standard code
checkForOverrides = True
elif Globals.imageMode == CRASHED:
# Running the reversing procedure for being stuck or lost
calculateDistance = False
if self.overrideStuckTicks < Settings.stuckOverrideFrames:
# Keep reversing at full speed
overrideSteering = 0.0
overrideSpeed = self.stuckSpeed
# Check for left or right hunt
if self.huntColours[0] > 0:
# Red, turn left
self.huntLeft = True
elif self.huntColours[1] > 0:
# Green, turn left
self.huntLeft = False
self.overrideStuckTicks += 1
elif self.overrideStuckTicks < (Settings.stuckOverrideFrames + Settings.stuckHuntFrames):
# Move forward at an angle to try and re-catch the track
overrideSpeed = -self.stuckSpeed
if self.huntLeft:
overrideSteering = +1.0
else:
overrideSteering = -1.0
if overrideSpeed < 0.0:
overrideSteering *= -1.0
self.overrideStuckTicks += 1
else:
# We have finished hunting, stop and reset
LogData(LOG_MAJOR, '< END OVERRIDE: STUCK >')
overrideSpeed = 0.0
self.__Reset__()
SetImageMode(Globals.lastImageMode)
elif Globals.imageMode == FLIPPED:
# Flipped over, invert control
overrideSpeed = -overrideSpeed
overrideSteering = -overrideSteering
checkForOverrides = True
elif Globals.imageMode == WRONG_WAY:
# Driving the wrong way, we need to spin around
calculateDistance = False
if self.wrongWayTicks < Settings.wrongWaySpinFrames:
overrideSpeed = 1.0
overrideSteering = +1.0
self.wrongWayTicks += 1
else:
LogData(LOG_MAJOR, '< END OVERRIDE: WRONG WAY >')
self.wrongWayCount = 0
overrideSpeed = 0.0
self.__Reset__()
SetImageMode(Globals.lastImageMode)
pass
elif Globals.imageMode == RACE_OVER:
# Done racing, set motors off
overrideSpeed = 0.0
elif Globals.imageMode == FIRST_STRAIGHT:
# Initial straight, full speed forward :)
if self.firstStraightTicks == None:
# Pick a time between the limits
self.firstStraightTicks = random.random() * Settings.frameRate
self.firstStraightTicks *= (Settings.firstStraightMax - Settings.firstStraightMin)
self.firstStraightTicks += Settings.firstStraightMin
if self.firstStraightTicks > 0:
# Override control with straight forward
overrideSpeed = Globals.userSpeed
overrideSteering = 0.0
self.firstStraightTicks -= 1
else:
# Completed our time, return to normal control
SetImageMode(FOLLOW_TRACK)
if checkForOverrides:
# Check for new override conditions
if self.stuckFrameCount >= Settings.stuckIdenticalFrames:
# We are stuck or lost, engage the stuck override
LogData(LOG_MAJOR, '< START OVERRIDE: STUCK >')
calculateDistance = False
Globals.lapTravelled -= Settings.stuckDetectedDistanceCorrection
self.overrideStuckTicks = 0
if Globals.imageMode == FLIPPED:
self.stuckSpeed = +1.0
else:
self.stuckSpeed = -1.0
SetImageMode(CRASHED)
overrideSpeed, overrideSteering = self.PerformOverrides(overrideSpeed, overrideSteering)
elif self.wrongWayCount > Settings.wrongWayThreshold:
# We seem to be facing the wrong way
LogData(LOG_MAJOR, '< START OVERRIDE: WRONG WAY >')
calculateDistance = False
self.wrongWayTicks = 0
SetImageMode(WRONG_WAY)
overrideSpeed = 0.0
overrideSteering = 0.0
elif (not self.overtaking) and (self.unknownPointCount >= Settings.overtakeThreshold):
# There may be a car in front, try and overtake
LogData(LOG_MAJOR, '< START OVERRIDE: OVERTAKE >')
if self.unknownPointAverage < 0.5:
# Robot to the left, overtake to the right
self.autoTargetLane = -Settings.overtakeLaneOffset
else:
# Robot to the right, overtake to the left
self.autoTargetLane = +Settings.overtakeLaneOffset
self.overtakeRemainingTicks = Settings.overtakeDurationFrames
self.overtakeBrakingTicks = Settings.overtakeBrakingFrames
self.overtaking = True
LogData(LOG_MAJOR, 'Overtaking at lane offset %.2f' % (self.autoTargetLane))
if self.overtaking:
# Count down the remaining overtake distance
self.overtakeRemainingTicks -= 1
self.overtakeBrakingTicks -= 1
if self.overtakeRemainingTicks < 0:
# Overtaking complete, reset the target lane for the next loop
LogData(LOG_MAJOR, '< END OVERRIDE: OVERTAKE >')
self.autoTargetLane = 0.0
self.overtaking = False
elif self.overtakeBrakingTicks >= 0:
# Initial part of the overtake, apply braking
overrideSpeed *= Settings.overtakeBrakingSpeed
print overrideSpeed
self.accumulateDistance = calculateDistance
return overrideSpeed, overrideSteering
def RunLoop(self, (line0, local0, d1, d2)):
with self.lock:
# Select the correct d0 depending on nearest line or track position
full0 = line0 + local0
d0 = full0 # red / green line
d0 = d0 - (Settings.targetTrackPosition + Globals.userTargetLane + self.autoTargetLane)
# Track offset loop (d0)
self.p0 = Settings.Kp0 * d0
self.i0 += Settings.Ki0 * d0
if self.i0 > self.clipMax:
self.i0 = self.clipMax
elif self.i0 < self.clipMin:
self.i0 = self.clipMin
self.d0 = Settings.Kd0 * (d0 - self.lastD0)
self.pid0 = self.p0 + self.i0 + self.d0
if self.pid0 > self.clipMax:
self.pid0 = self.clipMax
elif self.pid0 < self.clipMin:
self.pid0 = self.clipMin
# Track angle loop (d1)
self.p1 = Settings.Kp1 * d1
self.i1 += Settings.Ki1 * d1
if self.i1 > self.clipMax:
self.i1 = self.clipMax
elif self.i1 < self.clipMin:
self.i1 = self.clipMin
self.d1 = Settings.Kd1 * (d1 - self.lastD1)
self.pid1 = self.p1 + self.i1 + self.d1
if self.pid1 > self.clipMax:
self.pid1 = self.clipMax
elif self.pid1 < self.clipMin:
self.pid1 = self.clipMin
# Track curvature loop (d2)
self.p2 = Settings.Kp2 * d2
self.i2 += Settings.Ki2 * d2
if self.i2 > self.clipMax:
self.i2 = self.clipMax
elif self.i2 < self.clipMin:
self.i2 = self.clipMin
self.d2 = Settings.Kd2 * (d2 - self.lastD2)
self.pid2 = self.p2 + self.i2 + self.d2
if self.pid2 > self.clipMax:
self.pid2 = self.clipMax
elif self.pid2 < self.clipMin:
self.pid2 = self.clipMin
# Speed setting
if self.moving:
speed = Globals.userSpeed
else:
speed = 0.0
# Note the average and change in d0 for the last two frames for distance calculations
self.distanceD0 = (self.lastD0 + d0) / 2.0
self.changeD0 = d0 - self.lastD0
# Note the old values
self.lastD0 = d0
self.lastD1 = d1
self.lastD2 = d2
# Set the final drive
steering = self.pid0 + self.pid1 + self.pid2
self.FirFilter(speed, steering)
# Image stream processing thread
class StreamProcessor(threading.Thread):
def __init__(self, name, autoRun = True):
super(StreamProcessor, self).__init__()
self.event = threading.Event()
self.terminated = False
self.name = str(name)
self.shownCount = 0
self.lastFrame = None
self.lastLightsFrame = None
self.olderLightsFrame = None
self.burnCount = Settings.lightsBurnFrames
if self.burnCount < 2:
self.burnCount = 2
self.eventWait = (2.0 * Settings.processingThreads) / Settings.frameRate
if autoRun:
LogData(LOG_CRITICAL, 'Processor thread %s started with idle time of %.2fs' % (self.name, self.eventWait))
self.start()
else:
LogData(LOG_CRITICAL, 'Processor thread %s loaded and waiting for instructions' % (self.name))
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
self.event.wait(self.eventWait)
if self.event.isSet():
if self.terminated:
break
try:
# grab the image and do some processing on it
if Settings.flippedImage and Settings.horizontalFlip:
image = cv2.flip(self.nextFrame, 0)
elif Settings.horizontalFlip:
image = cv2.flip(self.nextFrame, 1)
elif Settings.flippedImage:
image = cv2.flip(self.nextFrame, -1)
else:
image = self.nextFrame
self.ProcessImage(image)
finally:
# Reset the event
self.nextFrame = None
self.event.clear()
# Return ourselves to the pool at the back
with Globals.frameLock:
Globals.processorPool.insert(0, self)
LogData(LOG_CRITICAL, 'Processor thread %s terminated' % (self.name))
# Helper function to show images
def ShowImage(self, name, image):
self.shownCount += 1
name = '%d - %s' % (self.shownCount, name)
if scaleDebugImage != 1.0:
size = (int(image.shape[1] * scaleDebugImage), int(image.shape[0] * scaleDebugImage))
image = cv2.resize(image, size, interpolation = cv2.INTER_CUBIC)
cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)
cv2.imshow(name, image)
cv2.waitKey(1)
# Helper function for plotting
def DrawCross(self, image, x, y, (r, g, b)):
points = [(x,y), (x-1,y), (x+1,y), (x,y-1), (x,y+1)]
for point in points:
x = point[0]
y = point[1]
if (x >= 0) and (y >= 0) and (x < Settings.imageWidth) and (y < Settings.imageHeight):
if b != None:
image.itemset((y, x, 0), b)
if g != None:
image.itemset((y, x, 1), g)
if r != None:
image.itemset((y, x, 2), r)
# Helper function for plotting
def DrawPoints(self, image, points, xOffset, yOffset, (r, g, b)):
for point in points:
x = point[0] + xOffset
y = point[1] + yOffset
self.DrawCross(image, x, y, (r, g, b))
# Find edges in a boolean image
def SweepLine(self, image, Y):
risingX = []
fallingX = []
line = image[Y, :]
width = len(line)
changed = numpy.where(line[:-1] != line[1:])[0]
current = line.item(0)
for i in changed:
if current:
typeX = fallingX
else:
typeX = risingX
# Filter out changes at the edge of the image
if i < 2:
pass
elif i > (width - 3):
pass
else:
typeX.append(i)
current = not current
return risingX, fallingX
# Remove matches from the non-target lists
def EliminateMatches(self, Y, valsTarget, valsA, valsB):
for Xt in valsTarget:
if valsA:
for Xa in valsA:
newDistance = abs(Xt - Xa)
if newDistance <= Settings.maxSepX:
valsA.remove(Xa)
if valsB:
for Xb in valsB:
newDistance = abs(Xt - Xb)
if newDistance <= Settings.maxSepX:
valsB.remove(Xb)
# Find matches
def FindMatches(self, Y, valsTarget, valsA, valsB, valsC, matchNone, matchA, matchB, matchC):
while len(valsTarget) > 0:
Xt = valsTarget.pop()
if valsA:
matchDistance = 999999
matchedX = None
for Xa in valsA:
newDistance = abs(Xt - Xa)
if newDistance < matchDistance:
matchDistance = newDistance
matchedX = Xa
if matchDistance <= Settings.maxSepX:
X = (Xt + matchedX) / 2
matchA.append((X, Y))
valsA.remove(matchedX)
continue
if valsB:
matchDistance = 999999
matchedX = None
for Xb in valsB:
newDistance = abs(Xt - Xb)
if newDistance < matchDistance:
matchDistance = newDistance
matchedX = Xb
if matchDistance <= Settings.maxSepX:
X = (Xt + matchedX) / 2
matchB.append((X, Y))
valsB.remove(matchedX)
continue
if valsC:
matchDistance = 999999
matchedX = None
for Xc in valsC:
newDistance = abs(Xt - Xc)
if newDistance < matchDistance:
matchDistance = newDistance
matchedX = Xc
if matchDistance <= Settings.maxSepX:
X = (Xt + matchedX) / 2
matchC.append((X, Y))
valsC.remove(matchedX)
continue
matchNone.append((Xt, Y))
# Image processing function
def ProcessImage(self, image):
# Frame rate counter
with Globals.frameLock:
Globals.lastRawFrame = image
self.frame = Globals.frameCounter
Globals.frameAnnounce += 1
Globals.frameCounter += 1
if Globals.frameAnnounce == Settings.fpsInterval:
frameStamp = time.time()
if showFps:
fps = Settings.fpsInterval / (frameStamp - Globals.lastFrameStamp)
fps = '%.1f FPS' % (fps)
print fps
saveImages = writeImages
showAll = debugImages
saveAll = saveImages and debugImages
saveRaw = writeRawImages
Globals.frameAnnounce = 0
Globals.lastFrameStamp = frameStamp
else:
saveImages = False
showAll = False
saveAll = False
saveRaw = False
if showAll:
self.ShowImage('raw', image)
if saveAll or saveRaw:
cv2.imwrite(filePattern % (self.frame, 'raw'), image)
# See what mode we are in
checkStuck = False
checkForStart = False
if Globals.imageMode == READY_TO_RACE:
# Waiting for commands, process to determine grid position
pass
elif Globals.imageMode == WAIT_FOR_LIGHTS:
# Waiting for the lights sequence
# Grab the lights section
lightsFrame = image[Settings.lightsY1 : Settings.lightsY2, Settings.lightsX1 : Settings.lightsX2]
if showProcessing:
Globals.displayFrame = lightsFrame
# Get a difference from and to the new frame verses the frame two previous
if self.burnCount > 0:
self.burnCount -= 1
diffLights = numpy.zeros_like(lightsFrame)
diff2Lights = numpy.zeros_like(lightsFrame)
else:
diffLights = cv2.subtract(lightsFrame, self.olderLightsFrame)
diff2Lights = cv2.subtract(self.olderLightsFrame, lightsFrame)
# Maintain a history of two previous frames
self.olderLightsFrame = self.lastLightsFrame
self.lastLightsFrame = lightsFrame
# Split out the three channels
diffBlue, diffGreen, diffRed = cv2.split(diffLights)
diff2Blue, diff2Green, diff2Red = cv2.split(diff2Lights)
redLevel = diffRed.mean() * Settings.lightsRedGain
greenLevel = diffGreen.mean()
red2Level = diff2Red.mean() * Settings.lightsRedGain
green2Level = diff2Green.mean()
# Work out on and off
redOnLevel = redLevel - red2Level
greenOnLevel = greenLevel - green2Level
redOffLevel = red2Level - redLevel
greenOffLevel = green2Level - greenLevel
# Check the levels
if (redOnLevel > Settings.lightsChangeThreshold) or (greenOnLevel > Settings.lightsChangeThreshold):
if redOnLevel > greenOnLevel:
lightOn = 'R'
else:
lightOn = 'G'
else:
lightOn = None
if (redOffLevel > Settings.lightsChangeThreshold) or (greenOffLevel > Settings.lightsChangeThreshold):
if redOffLevel > greenOffLevel:
lightOff = 'R'
else:
lightOff = 'G'
else:
lightOff = None
# State machine for lights
if Globals.startLights == READY_OFF:
# Move on if we changed to green
if lightOn == 'G':
Globals.startLights = FIRST_GREEN
LogData(LOG_MAJOR, 'Lights: 1 - Green')
elif Globals.startLights == FIRST_GREEN:
# Move on if we changed to red
if lightOn == 'R': #and lightOff == 'G':
Globals.startLights = SECOND_RED
LogData(LOG_MAJOR, 'Lights: 2 - Red')
elif Globals.startLights == SECOND_RED:
# Move on if we changed to green
if lightOn == 'G': #and lightOff == 'R':
Globals.startLights = THIRD_GREEN_GO
LogData(LOG_MAJOR, 'Lights: 3 - Green')
elif Globals.startLights == THIRD_GREEN_GO:
# Ready, start racing
LogData(LOG_CRITICAL, 'Lights: GO')
self.lastLightsFrame = None
self.olderLightsFrame = None
self.burnCount = Settings.lightsBurnFrames
if self.burnCount < 2:
self.burnCount = 2
if Settings.firstStraightOverride:
SetImageMode(FIRST_STRAIGHT)
else:
SetImageMode(FOLLOW_TRACK)
Globals.controller.lastStartMarker = time.time()
else:
LogData(LOG_CRITICAL, '! BAD LIGHTS STATE !')
Globals.startLights = READY_OFF
return
elif Globals.imageMode == FOLLOW_TRACK:
# Normal driving, proceed to standard code
checkStuck = True
checkForStart = True
elif Globals.imageMode == CRASHED:
# Crash detected, keep processing normally as this is handled in the control loop
if Globals.lastImageMode == FLIPPED:
# Flipped over, invert camera
image = cv2.flip(image, -1)
elif Globals.imageMode == FLIPPED:
# Flipped over, invert camera
checkStuck = True
checkForStart = True
image = cv2.flip(image, -1)
elif Globals.imageMode == WRONG_WAY:
# Driving the wrong way, keep processing normally as this is handled in the control loop
checkStuck = True
if Globals.lastImageMode == FLIPPED:
# Flipped over, invert camera
image = cv2.flip(image, -1)
elif Globals.imageMode == RACE_OVER:
# Done racing, finish the processing loop here
Globals.running = False
return
elif Globals.imageMode == FIRST_STRAIGHT:
# First straight dash, keep processing normally as this is handled in the control loop
# We do not want to check for the start line here, it may confuse things!
pass
else:
# Unexpected mode!!!
LogData(LOG_CRITICAL, '! BAD IMAGE STATE !')
SetImageMode(READY_TO_RACE)
# Quick check for a flipped robot
bAll, gAll, rAll = cv2.split(image)
maxImage = numpy.maximum(numpy.maximum(bAll, gAll), rAll)
topCrop = maxImage[Settings.flipDetectionY1 : Settings.flipDetectionY2, :]
bottomCrop = maxImage[Settings.flipDetectionY3 : Settings.flipDetectionY4, :]
bottomVar = bottomCrop.var()
topVar = topCrop.var()
if topVar > 0.0:
flippedDetectionLevel = bottomVar / topVar
else:
flippedDetectionLevel = 0.0
if flippedDetectionLevel > Settings.flipDetectionThreshold:
# We may have a flipped image
Globals.controller.flippedImageCount += 1
if Globals.controller.flippedImageCount > Settings.flipDetectionFrames:
# We think this is a flipped image!
if Globals.imageMode == FOLLOW_TRACK:
# In normal mode, swap to flipped
LogData(LOG_MAJOR, '< START OVERRIDE: FLIPPED >')
SetImageMode(FLIPPED)
Globals.controller.flippedImageCount = 0
image = cv2.flip(image, -1)
elif Globals.imageMode == FLIPPED:
# In flipped mode, swap back to normal
LogData(LOG_MAJOR, '< END OVERRIDE: FLIPPED >')
SetImageMode(FOLLOW_TRACK)
Globals.controller.flippedImageCount = 0
image = cv2.flip(image, -1)
else:
# Busy doing some other override, let this pass for now
pass
else:
# We think this image is fine, reset the counter
Globals.controller.flippedImageCount = 0
# Crop the frame
cropped = image[Settings.cropY1 : Settings.cropY2, Settings.cropX1 : Settings.cropX2, :]
# Check if the frame is not changing much (we are stuck)
if checkStuck:
if self.lastFrame != None:
differences = cv2.absdiff(cropped, self.lastFrame)
differenceLevel = numpy.mean(differences)
if differenceLevel < Settings.stuckIdenticalThreshold:
# Stuck frame
Globals.controller.stuckFrameCount += 1
else:
# Not a stuck frame
Globals.controller.stuckFrameCount = 0
else:
# Checking off, reset counter
Globals.controller.stuckFrameCount = 0
self.lastFrame = cropped
# Auto-brighten
maximum = numpy.max(cropped)
adjustment = 255.0 / maximum
if adjustment > Settings.autoGainMax:
adjustment = Settings.autoGainMax
corrected = cropped * adjustment
corrected = numpy.clip(corrected, 0, 255)
corrected = numpy.array(corrected, dtype = numpy.uint8)
if showAll:
self.ShowImage('corrected', corrected)
if saveAll:
cv2.imwrite(filePattern % (self.frame, 'corrected'), corrected)
# Find the dark portions for exclusion
black = cv2.inRange(corrected, numpy.array((0, 0, 0)),
numpy.array((Settings.blackMaxB, Settings.blackMaxG, Settings.blackMaxR)))
# Erode the black detection to remove noise
if Settings.erodeChannels > 1:
erodeKernel = numpy.ones((Settings.erodeChannels, Settings.erodeChannels), numpy.uint8)
black = cv2.erode(black, erodeKernel)
if showAll:
self.ShowImage('black', black)
if saveAll:
cv2.imwrite(filePattern % (self.frame, 'black'), black)
# Grab the section of the image used for the start line
lineR = image[Settings.startY, Settings.startX1 : Settings.startX2, 2]
lineG = image[Settings.startY, Settings.startX1 : Settings.startX2, 1]
lineB = image[Settings.startY, Settings.startX1 : Settings.startX2, 0]
# Check each pixel for a start marker colour match
matchR = lineR > Settings.startMinR
matchG = lineG < Settings.startMaxG
matchB = lineB < Settings.startMaxB
match = numpy.logical_and(numpy.logical_and(matchR, matchG), matchB)
matchRatio = numpy.count_nonzero(match) / float(len(match))
if matchRatio >= Settings.startRatioMin:
startDetected = True
else:
startDetected = False
if checkForStart:
# Start line testing logic
if Globals.startWaitCount > 0:
# Countdown to announcement
Globals.startWaitCount -= 1
if Globals.startWaitCount <= 0:
# Crossed line
LogData(LOG_MAJOR, '--- START-LINE CROSSED ---')
Globals.lapTravelled = 0.0
Globals.lapCount += 1
elif Globals.seenStart:
# We have seen the start line, wait for it to move out of sight
if not startDetected:
LogData(LOG_MAJOR, '--- START-LINE OUT OF SIGHT ---')
Globals.seenStart = False
else:
# We are still waiting to see the start line
if startDetected:
detectionTime = time.time()
if (detectionTime - Globals.controller.lastStartMarker) < Settings.startRedetectionSeconds:
LogData(LOG_MINOR, '--- START-LINE SEEN BUT TOO SOON ---')
else:
Globals.controller.lastStartMarker = detectionTime
LogData(LOG_MAJOR, '--- START-LINE DETECTED ---')
Globals.seenStart = True
if Settings.startCrossedFrames < 1:
# No need to wait, we have already crossed the line
LogData(LOG_MAJOR, '--- START-LINE CROSSED ---')
Globals.lapTravelled = 0.0
Globals.lapCount += 1
else:
# Wait for a countdown before we announce crossing
Globals.startWaitCount = Settings.startCrossedFrames
# Convert to red, green, and blue sections
blue, green, red = cv2.split(cropped)
## Work out some automatic levels and gains based on the whole image
dropR = rAll.min()
dropG = gAll.min()
dropB = bAll.min()
autoGainR = Settings.targetLevel / rAll.max()
autoGainG = Settings.targetLevel / gAll.max()
autoGainB = Settings.targetLevel / bAll.max()
# Apply gains to make the channels roughly equal
red = (red - dropR) * Settings.redGain * autoGainR
green = (green - dropG) * Settings.greenGain * autoGainG
blue = (blue - dropB) * Settings.blueGain * autoGainB
# Clamp the values to the standard range
red = numpy.clip(red, 0, 255)
green = numpy.clip(green, 0, 255)
blue = numpy.clip(blue, 0, 255)
red = numpy.array(red, dtype = numpy.uint8)
green = numpy.array(green, dtype = numpy.uint8)
blue = numpy.array(blue, dtype = numpy.uint8)
# Remove any section where a different channel is stronger
maxImage = numpy.maximum(numpy.maximum(blue, green), red)
red [red < maxImage] = 0
green[green < maxImage] = 0
blue [blue < maxImage] = 0
exclude = black > 0
red [exclude] = 0
green[exclude] = 0
blue [exclude] = 0
# Erode each channel to remove noise
if Settings.erodeChannels > 1:
red = cv2.erode(red, erodeKernel)
green = cv2.erode(green, erodeKernel)
blue = cv2.erode(blue, erodeKernel)
# Extract the closest track colours for hunting override
Globals.controller.huntColours = (
red[Settings.stuckDetectColourY, Settings.stuckDetectColourX],
green[Settings.stuckDetectColourY, Settings.stuckDetectColourX],
blue[Settings.stuckDetectColourY, Settings.stuckDetectColourX]
)
# Display colours
if showAll or saveAll or predatorView:
adjusted = cv2.merge([blue, green, red])
walls = cv2.merge([black, black, black])
adjusted = cv2.addWeighted(adjusted, 1.0, walls, 1.0, 0)
if showAll:
self.ShowImage('r', red)
self.ShowImage('g', green)
self.ShowImage('b', blue)
if showAll:
self.ShowImage('adjusted', adjusted)
if predatorView:
Globals.displayPredator = adjusted
if saveAll:
cv2.imwrite(filePattern % (self.frame, 'adjusted'), adjusted)
# Find image contours and plot
if showAll or saveAll:
displayImage = image.copy()
rContours, hierarchy = cv2.findContours(red, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
gContours, hierarchy = cv2.findContours(green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
bContours, hierarchy = cv2.findContours(blue, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
kContours, hierarchy = cv2.findContours(black, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
cv2.drawContours(displayImage, rContours, -1, (0,0,255), 1, offset = (Settings.cropX1, Settings.cropY1))
cv2.drawContours(displayImage, gContours, -1, (0,255,0), 1, offset = (Settings.cropX1, Settings.cropY1))
cv2.drawContours(displayImage, bContours, -1, (255,0,0), 1, offset = (Settings.cropX1, Settings.cropY1))
cv2.drawContours(displayImage, kContours, -1, (0,0,0), 1, offset = (Settings.cropX1, Settings.cropY1))
if showAll:
self.ShowImage('contours', displayImage)
if saveAll:
cv2.imwrite(filePattern % (self.frame, 'contours'), displayImage)
# Make thresholded arrays into boolean arrays
red = red > 0
green = green > 0
blue = blue > 0
black = black > 0
# Find the edges
krLine = []
rbLine = []
brLine = []
rgLine = []
gbLine = []
bgLine = []
gkLine = []
others = []
wrongWay = []
for Y in Settings.croppedYScan:
rRisingX, rFallingX = self.SweepLine(red, Y)
gRisingX, gFallingX = self.SweepLine(green, Y)
bRisingX, bFallingX = self.SweepLine(blue, Y)
kRisingX, kFallingX = self.SweepLine(black, Y)
# Match based on pairings
self.FindMatches(Y, rRisingX, bFallingX, kFallingX, gFallingX, others, brLine, krLine, wrongWay)
self.FindMatches(Y, rFallingX, bRisingX, gRisingX, None, others, rbLine, rgLine, None)
self.FindMatches(Y, gFallingX, bRisingX, kRisingX, None, others, gbLine, gkLine, None)
self.FindMatches(Y, bFallingX, gRisingX, None, None, others, bgLine, None, None)
for values in [rRisingX, rFallingX,
gRisingX, gFallingX,
bRisingX, bFallingX,
kRisingX, kFallingX]:
for X in values:
others.append((X, Y))
lines = [krLine,
rbLine,
brLine,
rgLine,
gbLine,
bgLine,
gkLine]
Globals.controller.wrongWayCount = len(wrongWay)
if len(others) > 0:
Globals.controller.unknownPointAverage = numpy.array(others)[:,0].mean() / (Settings.cropX2 - Settings.cropX1)
else:
Globals.controller.unknownPointAverage = 0.5
Globals.controller.unknownPointCount = len(others)
# Plot the lines
buildDisplay = showProcessing or saveImages
if buildDisplay:
displayImage = image.copy()
self.DrawPoints(displayImage, wrongWay, Settings.cropX1, Settings.cropY1, (255, 255, 255))
for i in range(len(lines)):
self.DrawPoints(displayImage, lines[i], Settings.cropX1, Settings.cropY1, lineIndexToColour[i])
if showUnknownPoints:
self.DrawPoints(displayImage, others, Settings.cropX1, Settings.cropY1, (0, 0, 0))
if saveImages:
cv2.imwrite(filePattern % (self.frame, 'final'), displayImage)
self.SetSpeedFromLines(lines, True, displayImage)
else:
self.SetSpeedFromLines(lines, False, None)
# Set the drive speed and steering from the detected lines
def SetSpeedFromLines(self, lines, buildDisplay, displayImage):
Globals.lastLines = lines
# Defaults
hasValues = False
# Find the best line
count = 0