-
Notifications
You must be signed in to change notification settings - Fork 3
/
FumeBot.py
3688 lines (2793 loc) · 175 KB
/
FumeBot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
FUMEBOT GUI APPLICATION
This is GUI application to connect and communicate with FumeBot, send and receive data
over TCP/IP sockets to control the robot. This application uses several threads for socket
communication
Written by : Ajith Thomas
Date : 5-4-2018
"""
import os
import sys
import cv2
import time
import datetime
import ctypes
import numpy as np
from PyQt4 import QtCore, QtGui
from configparser import ConfigParser
from FumeBotGUI.FumeBot_UI import Ui_mainWindow
from FumeBotGUI.FumeBotDataSaver import FumeBotVideoSaver, FumeBotTrainingDataSaver
from FumeBotGUI.FumeBotDNN import FumeBotDNN
from FumeBotGUI.FumeBotSockComm import SockComm
class MainWindow(QtGui.QMainWindow):
# Configuration file
config_path="config.ini"
# Application version
app_version="v0.1"
app_name="FumeBot"
app_id=app_name+"."+app_version
# Type of messages
app_msg="Interface"
bot_msg="Robot"
# Socket connection variables
socket_data_connected=False # For data stream
socket_img_connected=False # For image stream
# Controller status
micro_controller_rdy=False
prev_msg_disp_time=0 # Variable stores the last time the data not connected port message
msg_delay=2 # The delay for it (s)
# Camera display and processing
frame_black=np.zeros((720,1280,3),dtype=np.uint8)
frame_bgr=np.zeros((720,1280,3),dtype=np.uint8)
frame_train_bgr=np.zeros((720, 1280, 3), dtype=np.uint8)
frame_dnn_bgr=np.zeros((60,80,3), dtype=np.uint8)
frame_thermal=np.zeros((720,1280,3),dtype=np.uint8)
frame_thermal_copy=np.zeros((120,160,3),dtype=np.uint8)
final_frame=np.zeros((720, 1280, 3), dtype=np.uint8)
display_fps=30 # FPS for the video feed display (This affects only the UI display and not the actual camera FPS)
cam_actual_fps=0
prev_cam_rcv_time=time.time()
therm_actual_fps=0
prev_therm_rcv_time=time.time()
normal_cam_enable=True
thermal_cam_enable=False
# Variables for blending
norm_cam_weight=50.0
therm_cam_weight=50.0
# Variables for adjusting the thermal view
therm_scale_val=1
therm_pos_horz=0
therm_pos_vert=0
# Gas sensor readings
eCO2=0
TVOC=0
# Particle sensor readings
red=0
green=0
ir=0
# Environmental sensor readings
pressure=0
temperature=0
humidity=0
# Gas sensor thresholds
eCO2_thresh=0
TVOC_thresh=0
gas_threshold_reached=False # To say whether the threshold has been reached or not
# Particle sensor threshold
Red_thresh=0
Green_thresh=0
IR_thresh=0
particle_threshold_reached=False # To say whether the threshold has been reached or not
flash_warning=False # Boolean to say when to flash the warning
flash_non_critical=False # Boolean to say when to flash the non critical warnings
flash_time_w=250 # Flash timer interval for warnings
flash_timer_nc=500 # Flash timer interval for non critical warnings
# Max values for gas sensor
cur_max_eCO2=0
cur_max_TVOC=0
# Max values for particle sensor
cur_max_Red=0
cur_max_Green=0
cur_max_IR=0
# Neural net
nn_active=False
# SMS alert variable
sms_number="09876543210"
sms_enabled=0
# For the WASD key control
button_repeat_delay=25 # Movement button delay
key_pressed_dict={
'W' : False,
'A' : False,
'S' : False,
'D' : False,
'SHIFT' : False,
'CTRL' : False
}
default_key_pressed_dict=key_pressed_dict.copy() # Make a shallow copy
move_button_press_order_dict = { # 10 Control options
'FORWARD': 0,
'BACKWARD': 1,
'LEFT': 2,
'RIGHT': 3,
'LEFT_TURN_FORWARD': 4,
'RIGHT_TURN_FORWARD': 5,
'LEFT_TURN_BACKWARD': 6,
'RIGHT_TURN_BACKWARD': 7,
'ACCELERATION': 8, # Should not be used in training
'BRAKE': 9, # Should not be used in training
}
# This a dictionary for the DNN button press control
dnn_move_button_press_order_dict = { # 10 Control options
'FORWARD': 1,
'BACKWARD': 0,
'LEFT': 0,
'RIGHT': 0,
'LEFT_TURN_FORWARD': 0,
'RIGHT_TURN_FORWARD': 2,
'LEFT_TURN_BACKWARD': 0,
'RIGHT_TURN_BACKWARD': 0,
'ACCELERATION': 0, # Should not be used in training
'BRAKE': 0, # Should not be used in training
}
default_move_list=[] # This list is dynamically made by checking the keys that are allowed to be pressed
move_button_press_list=[] # The list for the button pressed
# Can also be used to not send the actual command to the robot as well
# This kind of disabling method was used for finer control and can be divided into 3 sections and disabled if needed
disabled_button_press_dict={ # These button/combination of button won't be saved in the training data
'FORWARD': False, # Basic controls
'BACKWARD': False,
'LEFT': False,
'RIGHT': False,
'LEFT_TURN_FORWARD': False, # Combinational controls
'RIGHT_TURN_FORWARD': False,
'LEFT_TURN_BACKWARD': False,
'RIGHT_TURN_BACKWARD': False,
'ACCELERATION': False, # Additional controls
'BRAKE': False, # False means not disabled and True means disabled
}
# This dictionary is used for the button press control of the DNN
dnn_disabled_button_press_dict = { # These button/combination of button won't be saved in the training data
'FORWARD': False, # Basic controls
'BACKWARD': True,
'LEFT': True,
'RIGHT': True,
'LEFT_TURN_FORWARD': False, # Combinational controls
'RIGHT_TURN_FORWARD': False,
'LEFT_TURN_BACKWARD': True,
'RIGHT_TURN_BACKWARD': True,
'ACCELERATION': True, # Additional controls
'BRAKE': True, # False means not disabled and True means disabled
}
# Copies for the list and dictionaries that are updated according to user changes (Used in UI update)
updated_key_order_dict=move_button_press_order_dict.copy() # Shallow copy is made
updated_disabled_key_dict=disabled_button_press_dict.copy() # Shallow copy is made
# The total number of allowed key combination is calculated using the disabled button dictionary (Updated)
keys_comb_len=len(disabled_button_press_dict) # Number of different key presses
# According to the disabled buttons dictionary the commands for that movement won't be send to the robot
disable_send_command=False # False means not disabled and True means disabled
disabled_move_key_pressed=False # Variable to say whether a disabled key was pressed or not (True means pressed)
# Variables for the training data generation
train_file_name='training_dataset.npy'
train_file_path='~\\Documents\\FumeBot\\Training'
train_path_exists=False
train_frame_width=80
train_frame_height=60
training_data=[] # This list contains the image and control input (Key presses)
save_per_every=250 # After 250 training samples a save is done
enable_training_data_recording=False
training_data_recording_paused=False
save_done=False
training_frame_count=0
# Variables for the training numpy meta file
parent_file_name=train_file_name
meta_key_order={}
meta_disabled_key={}
ko_comp_failed = False
dk_comp_failed = False
# For the mouse button control
mouse_pressed_dict={
'LEFT_MB':False,
'MID_MB':False,
'RIGHT_MB':False,
}
# Used for nulling
start_x=0
start_y=0
# These are the component x and y values for the mouse position after nulling
mouse_x=0
mouse_y=0
pan_scaler=4.0 # This is amount of mouse movement needed (Higher the scaler the more mouse movement is needed)
tilt_scaler=4.0
max_pan_tilt_scaler=10.0
# Servo motor should be checked to see if the limits can be achieved
pan_tilt_limit_dict={
'TILT_UP':180,
'TILT_DOWN':0,
'PAN_LEFT':0,
'PAN_RIGHT':180,
}
dpp_tilt=(180.0/float(pan_tilt_limit_dict['TILT_UP']-pan_tilt_limit_dict['TILT_DOWN']))/tilt_scaler
dpp_pan=(180.0/float(pan_tilt_limit_dict['PAN_RIGHT']-pan_tilt_limit_dict['PAN_LEFT']))/pan_scaler
pan_disp=0 # This can be shown on the display
tilt_disp=0 # This can be shown on the display
pan_angle=0 # The final pan angle
tilt_angle=0 # The final tilt angle
pan_default=90
tilt_default=90
enable_mouse_pan_tilt=True # False means disabled
# Commands for movement
movement_cmd_dict={
'FRWD' : 109,
'BWRD' : 113,
'LFT' : 127,
'RGT' : 179,
'L_TURN_F' : 181,
'R_TURN_F' : 191,
'L_TURN_B' : 193,
'R_TURN_B' : 197
}
brake=0 # Brake not state (0-Not applied, 1-Applied)
accl=0 # Increase speed state (0-Not used, 1-Used)
# Contains the header for the socket receives (This dictionary is the send on the other side)
socket_receive_header_dict = {
'GAS': 'G',
'ENVIRONMENTAL': 'E',
'PARTICLE': 'P',
'WIFI': 'W',
'SOCK_SET_ACK': 'AK', # This is used as an acknowledgement saying settings where applied successfully
'GAS_THR_RET': 'GTR',
'PAR_THR_RET': 'PTR',
'SMS_RET': 'SR',
'CAM_RET': 'CMR',
'MCU_STAT': 'MS',
}
ack_code_dict = {
'GAS_THR': 1,
'PAR_THR': 2,
'SMS_SET': 3,
'CON_SET': 4,
'VID_SET': 5,
'RST_ALM': 6,
}
# Contains the header for the socket sending (This dictionary is the receive on the other side)
socket_send_header_dict={
'MOVE': 'MV', # For movements of the robot
'CAM_PT': 'CPT', # Camera pan and tilt
'CAM_SET': 'CM',
'COM_SET': 'CO',
'GAS_THR': 'GT',
'PAR_THR': 'PT',
'SMS': 'SM',
'REBOOT_RPI': 'RRP',
'RESET_UC': 'RUC',
'PWR_OFF_RPI': 'PWR',
'RESET_ALARM': 'RA', # Reset the alarm state in the microcontroller
'RETRIEVE': 'RET', # This is used to retrieve settings from the RPi side (The headers will be the parameter)
}
# Contains the settings that got updated after retrieving the information from the RPi side
updated_settings_dict={
'CAM_SET': False,
'GAS_THR': False,
'PAR_THR': False,
'SMS': False,
}
# Wifi info
wifi_link_quality=0
wifi_signal_level=0
# Video HUD
enable_HUD=1 # 1 means enabled, 0 means enabled
# Recording video
video_file_name='Video.avi'
video_file_path='~\\Documents\\FumeBot\\Video'
video_path_exists=False
rec_width=1280
rec_height=720
rec_res_index=1
rec_fps=20
enable_video_recording=False
video_recording_paused=False
use_cap_resolution=0 # Use the video feed capture resolution
use_cap_fps=0 # Use the video feed capture FPS
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.config=ConfigParser()
try: # Try to read the configuration file
self.config.read_file(open(self.config_path))
except FileNotFoundError: # Make a new configuration file
self.make_config_file()
self.config.read(self.config_path)
self.host='192.168.137.1' # Network IP
self.port_img=8089 # Port for receiving image frames from
self.port_data=8090 # Port for receiving data
# This shows the icon of the app instead of the python program icon (Only needed for window)
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(self.app_id) # Will not work in Linux
# Objects of the UI
self.ui=Ui_mainWindow()
self.ui.setupUi(self)
# TCP socket stream image
self.soc_img=SockComm(self.host, self.port_img, SockComm.IMG_TYPE)
# TCP socket stream data
self.soc_data=SockComm(self.host, self.port_data, SockComm.DATA_TYPE)
self.display_info(self.app_msg,self.app_name+" "+self.app_version) # Display the app name and version
self.dnn=None
self.button_handle_timer=None
self.display_update_timer=None
self.warning_flasher_timer=None
self.non_critical_flasher_timer=None
self.connection_to_signals()
self.attributes_of_ui()
self.update_defaults_from_config() # Update the UI from the configuration file
# Video saver for the bot
self.vid_saver = FumeBotVideoSaver(name=self.video_file_name, path=self.video_file_path,
width=self.rec_width, height=self.rec_height, fps=self.rec_fps)
# Training data saver for the bot
self.train_data_saver = FumeBotTrainingDataSaver(name=self.train_file_name, path=self.train_file_path,
saves_ps=self.save_per_every)
self.check_file_path_statuses() # Check the video and training data save path
def connection_to_signals(self): # The signal from the UI and other signals are connected here
# For the mouse movement over the feed display widget
self.ui.video_frame.setMouseTracking(True)
self.ui.video_frame.installEventFilter(self)
# Signal for connecting button
self.ui.connectButton.clicked.connect(self.establish_connection)
# Signal for the new frame received from the MJPEG video stream
self.soc_img.frameReceived.connect(self.get_socket_stream_frames)
self.soc_img.capStatus.connect(self.capture_status_action)
# Signals for the new data received
self.soc_data.dataReceived.connect(self.get_socket_stream_data)
self.soc_data.sockConnected.connect(self.data_status_action)
# This is the timer that calls the handle button press function when WASD keys are pressed
self.button_handle_timer = QtCore.QTimer()
self.button_handle_timer.timeout.connect(self.handle_key_presses)
self.button_handle_timer.setInterval(self.button_repeat_delay) # Set the delay for the repeat
# This is the timer that is used to refresh the display panel
self.display_update_timer=QtCore.QTimer()
self.display_update_timer.timeout.connect(self.update_video_feed_display)
self.display_update_timer.setInterval(self.calc_interval_from_fps())
self.display_update_timer.start()
# Alarm reset button
self.ui.alarmResetButton.clicked.connect(self.reset_alarm)
# Set all thresholds button
self.ui.setAllThreshButton.clicked.connect(self.set_all_thresholds)
# Get all configuration button
self.ui.retreiveAllConfigButton.clicked.connect(self.retrieve_configurations)
# Video recording start and stop buttons
self.ui.startVideoRecButton.clicked.connect(self.start_vid_recording_button_clicked)
self.ui.stopVideoRecButton.clicked.connect(self.stop_vid_recording_button_clicked)
# Training data recording start and stop buttons
self.ui.startTrainRecButton.clicked.connect(self.start_training_data_rec_button_clicked)
self.ui.stopTrainRecButton.clicked.connect(self.stop_training_data_rec_button_clicked)
# Connections for signals from the set and reset button from the connections toolbox
self.ui.connectSaveButton.clicked.connect(self.connection_save_clicked)
self.ui.connectResetButton.clicked.connect(self.connection_reset_clicked)
# Connection for signals from video feed settings toolbox
self.ui.videoSetButton.clicked.connect(self.video_feed_config_set_clicked)
self.ui.videoResetButton.clicked.connect(self.video_feed_config_reset_clicked)
self.ui.enableThermalCheckBox.stateChanged.connect(self.enable_thermal_video_checkbox_changed)
# Connections for the video display settings toolbox
self.ui.feedTypeComboBox.currentIndexChanged.connect(self.display_mode)
self.ui.blendSlider.sliderMoved.connect(self.blend_slider)
self.ui.thermBlendScalerSlider.valueChanged.connect(self.thermal_image_scale_slider)
self.ui.thermPosHorzSlider.valueChanged.connect(self.thermal_image_pos_horz_slider)
self.ui.thermPosVertSlider.valueChanged.connect(self.thermal_image_pos_vert_slider)
self.ui.feedSetButton.clicked.connect(self.video_disp_set_clicked)
self.ui.feedResetButton.clicked.connect(self.video_disp_reset_clicked)
self.ui.enableHUDCheckBox.stateChanged.connect(self.hud_checkbox_changed)
# Connections for the video recording settings toolbox
self.ui.videoRecSetButton.clicked.connect(self.video_recording_config_set_clicked)
self.ui.videoRecResetButton.clicked.connect(self.video_recording_config_reset_clicked)
self.ui.VideoFileOpenButton.clicked.connect(self.open_video_recording_file_location)
self.ui.videoPathBrowseButton.clicked.connect(self.browse_for_video_recording_file_path)
self.ui.useCaptureResCheckBox.stateChanged.connect(self.disable_rec_vid_res_fps)
self.ui.useCaptureFPSCheckBox.stateChanged.connect(self.disable_rec_vid_res_fps)
# Connections for the training data recording settings toolbox
self.ui.trainingSetButton.clicked.connect(self.training_recording_config_set_clicked)
self.ui.trainingResetButton.clicked.connect(self.training_recording_config_reset_clicked)
self.ui.trainingFileOpenButton.clicked.connect(self.open_training_data_file_location_and_select)
self.ui.trainingFilePathBrowseButton.clicked.connect(self.browse_for_training_recording_file_path)
# Connection for the control key disabling and order changing section
self.ui.disableKeysSetButton.clicked.connect(self.disabled_key_config_set_clicked)
self.ui.disableKeysResetButton.clicked.connect(self.disabled_key_config_reset_clicked)
self.ui.forwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.backwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.leftCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.rightCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.leftTurnForwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.rightTurnForwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.leftTurnBackwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.rightTurnBackwardCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.accelerationCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
self.ui.brakeCheckBox.stateChanged.connect(lambda : self.disable_key_order_spin_box(redistribute=True))
# Connections fot the pan and tilt settings toolbox
self.ui.panTiltSetButton.clicked.connect(self.pan_tilt_config_set_clicked)
self.ui.panTiltResetButton.clicked.connect(self.pan_tilt_config_reset_clicked)
self.ui.panSensivitySlider.valueChanged.connect(self.pan_sensitivity_slider_changed)
self.ui.panSensitivitySpinBox.valueChanged.connect(self.pan_sensitivity_spinbox_changed)
self.ui.tiltSensivitySlider.valueChanged.connect(self.tilt_sensitivity_slider_changed)
self.ui.tiltSensivitySpinBox.valueChanged.connect(self.tilt_sensitivity_spinbox_changed)
self.ui.panTiltEnableCheckBox.stateChanged.connect(self.enable_pan_tilt_checkbox_changed)
# Connection for the gas sensor alarm settings
self.ui.gasThreshSetButton.clicked.connect(self.gas_threshold_set_clicked)
self.ui.gasThreshResetButton.clicked.connect(self.gas_threshold_reset_clicked)
# Connection for the particles sensor alarm settings
self.ui.partThreshSetButton.clicked.connect(self.particle_threshold_set_clicked)
self.ui.partThreshResetButton.clicked.connect(self.particle_threshold_reset_clicked)
# Connection for the SMS alert settings
self.ui.smsAlertSetButton.clicked.connect(self.sms_alert_set_clicked)
self.ui.smsAlertResetButton.clicked.connect(self.sms_alert_reset_clicked)
self.ui.smsAlertEnableCheckBox.stateChanged.connect(self.enable_sms_alert_checkbox_changed)
# Connection for the main menu on the menu bar
self.ui.menuMenu.triggered[QtGui.QAction].connect(self.process_menu_trigger)
# Connection for the help menu
self.ui.menuHelp.triggered[QtGui.QAction].connect(self.process_help_trigger)
# Connection for warning flasher timer signal
self.warning_flasher_timer = QtCore.QTimer()
self.warning_flasher_timer.timeout.connect(self.hud_warning_flasher_set)
self.warning_flasher_timer.setInterval(self.flash_time_w)
# Connection to non critical information flasher timer signal
self.non_critical_flasher_timer=QtCore.QTimer()
self.non_critical_flasher_timer.timeout.connect(self.hud_non_critical_flasher_set)
self.non_critical_flasher_timer.setInterval(self.flash_timer_nc)
# Connection to initializing and activating the DNN
self.ui.initDNNButton.clicked.connect(self.init_neural_net)
self.ui.controlDNNButton.clicked.connect(self.neural_net_activate_deactivate)
def attributes_of_ui(self): # Properties of the GUI
# For info display text edit
self.ui.infoDisplay.setReadOnly(True) # The text in the info is read only
# For the connection setting toolbox
self.ui.dataPortSpinBox.setMinimum(0)
self.ui.dataPortSpinBox.setMaximum(65535)
self.ui.videoPortSpinBox.setMinimum(0)
self.ui.videoPortSpinBox.setMaximum(65535)
# For video feed settings
self.ui.frameRateSpinBox.setMinimum(5)
self.ui.frameRateSpinBox.setMaximum(60)
# For video recording settings
self.ui.videoRecordingFPSSpinBox.setMinimum(5)
self.ui.videoRecordingFPSSpinBox.setMaximum(120)
# For the Training data recording
self.ui.autoSaveSampleSpinBox.setMinimum(10)
self.ui.autoSaveSampleSpinBox.setMaximum(10000)
self.ui.imgWidthSpinBox.setMinimum(5)
self.ui.imgWidthSpinBox.setMaximum(1000)
self.ui.imgHeightSpinBox.setMinimum(5)
self.ui.imgHeightSpinBox.setMaximum(1000)
# For the key order spin boxes
self.ui.forwardOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.backwardOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.leftTurnOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.rightTurnOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.l_turn_fOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.r_turn_fOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.l_turn_bOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.r_turn_bOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.acclOrderSpinBox.setRange(0,self.keys_comb_len)
self.ui.brakeOrderSpinBox.setRange(0,self.keys_comb_len)
# For camera pan and tilt settings
self.ui.panLeftSpinBox.setRange(0,180)
self.ui.panRightSpinBox.setRange(0,180)
self.ui.tiltDownSpinBox.setRange(0,180)
self.ui.tiltUpSpinBox.setRange(0,180)
self.ui.panSensivitySlider.setRange(1,self.max_pan_tilt_scaler)
self.ui.panSensivitySlider.setTickInterval(1)
self.ui.tiltSensivitySlider.setRange(1,self.max_pan_tilt_scaler)
self.ui.tiltSensivitySlider.setTickInterval(1)
self.ui.panSensitivitySpinBox.setRange(1, self.max_pan_tilt_scaler)
self.ui.tiltSensivitySpinBox.setRange(1,self.max_pan_tilt_scaler)
# For the video display settings
self.ui.blendSlider.setMinimum(0)
self.ui.blendSlider.setMaximum(100)
self.ui.blendSlider.setValue(50)
self.ui.thermBlendScalerSlider.setMinimum(1)
self.ui.thermBlendScalerSlider.setMaximum(125)
self.ui.thermBlendScalerSlider.setValue(1)
self.ui.thermPosHorzSlider.setMinimum(-500)
self.ui.thermPosHorzSlider.setMaximum(500)
self.ui.thermPosHorzSlider.setTickInterval(40)
self.ui.thermPosHorzSlider.setValue(0)
self.ui.thermPosVertSlider.setMinimum(-500)
self.ui.thermPosVertSlider.setMaximum(500)
self.ui.thermPosVertSlider.setTickInterval(40)
self.ui.thermPosVertSlider.setValue(0)
# For sensor alarm settings toolbox
self.ui.eCO2ThresholdSpinBox.setMinimum(0)
self.ui.eCO2ThresholdSpinBox.setMaximum(1000000)
self.ui.tvocThresholdSpinBox.setMinimum(0)
self.ui.tvocThresholdSpinBox.setMaximum(1000000000)
self.ui.redThresholdSpinBox.setMinimum(0)
self.ui.redThresholdSpinBox.setMaximum(1000000)
self.ui.greenThresholdSpinBox.setMinimum(0)
self.ui.greenThresholdSpinBox.setMaximum(1000000)
self.ui.irThresholdSpinBox.setMinimum(0)
self.ui.irThresholdSpinBox.setMaximum(1000000)
# For the SMS alert settings
self.ui.smsNumberLineEdit.setMaxLength(14) # A maximum of 14 character is allowed
def display_info(self,msg_id,disp_string): # Function to print information to the text display
msg_time=datetime.datetime.now().strftime('[%H:%M:%S]')
final_string="["+msg_id+"] "+msg_time+": "+disp_string
self.ui.infoDisplay.append(final_string)
# Socket connection
def establish_connection(self): # Function to establish the connection (Called when connect is clicked)
self.update_connection_info_bar() # update the display bar
if self.ui.connectButton.text() == "Connect": # User trying to connect
self.reinitialize_sockets_thread() # Reinitialize the connection with the host, port and type
self.display_info(self.app_msg,"Trying to connect to specified ports...")
self.display_info(self.app_msg,"Host IP: "+str(self.host))
self.display_info(self.app_msg,"Data port: "+str(self.port_data))
self.display_info(self.app_msg,"Video port: "+str(self.port_img))
self.ui.connectButton.setText("Connecting...") # Set the text of the button to connection this is used
self.soc_data.start_sock_read_thread() # Socket for sending data is started
self.soc_img.start_sock_read_thread() # Socket for video capture is started
self.ui.dataSocketStatusLabel.setText("Disconnected")
self.ui.capSocketStatusLabel.setText("Disconnected")
elif self.ui.connectButton.text() == "Connecting...": # Used to cancel the connection that is in progress
self.display_info(self.app_msg,"Connection attempt was cancelled by the user")
self.ui.dataSocketStatusLabel.setText("Disconnected")
self.ui.capSocketStatusLabel.setText("Disconnected")
self.close_socket_routine() # Function to close the sockets (Button text changed in function)
elif self.ui.connectButton.text() == "Disconnect": # To disconnect the above procedure is used again
self.display_info(self.app_msg,"Disconnecting from the robot")
# Since we are closing the sockets manually, we set the socket connection status as false
self.socket_data_connected=False
self.socket_img_connected=False
self.ui.dataSocketStatusLabel.setText("Disconnected")
self.ui.capSocketStatusLabel.setText("Disconnected")
self.close_socket_routine() # Close the socket
def close_socket_routine(self): # Function to close the socket communication and reinitialize
# Shutdown the sockets first to let the client know we are disconnecting
self.soc_data.socket_shutdown()
self.soc_img.socket_shutdown()
# Close the threads if they are running
self.soc_data.stop_sock_read_thread()
self.soc_img.stop_sock_read_thread()
# Since we closed the sockets in the above section we need to open new ports
self.reinitialize_sockets_thread()
# After everything is done set the text to connect, to allow the user to connect again
self.ui.connectButton.setText("Connect")
self.display_black_screen()
def reinitialize_sockets_thread(self): # Function to reinitialize the socket and thread to start a new one
# Reinitialize the class once again
self.soc_data.__init__(self.host, self.port_data, SockComm.DATA_TYPE)
self.soc_img.__init__(self.host, self.port_img, SockComm.IMG_TYPE)
# When we reinitialize the class the signal connections are lost and therefore needs to reconnected
# Signal for the new frame received from the MJPEG video stream
self.soc_img.frameReceived.connect(self.get_socket_stream_frames)
self.soc_img.capStatus.connect(self.capture_status_action)
# Signals for the new data received
self.soc_data.dataReceived.connect(self.get_socket_stream_data)
self.soc_data.sockConnected.connect(self.data_status_action)
print("Socket communication reinitialization done!")
def retrieve_configurations(self): # Function to retrieve configurations from the RPi side
# For the camera settings
cmd=self.format_command(self.socket_send_header_dict['RETRIEVE'],
[self.socket_send_header_dict['CAM_SET']])
self.send_socket_commands(cmd)
# For the gas threshold settings
cmd=self.format_command(self.socket_send_header_dict['RETRIEVE'],
[self.socket_send_header_dict['GAS_THR']])
self.send_socket_commands(cmd)
# For the particle threshold settings
cmd=self.format_command(self.socket_send_header_dict['RETRIEVE'],
[self.socket_send_header_dict['PAR_THR']])
self.send_socket_commands(cmd)
# For the SMS settings
cmd=self.format_command(self.socket_send_header_dict['RETRIEVE'],
[self.socket_send_header_dict['SMS']])
self.send_socket_commands(cmd)
# Functions to get the host and ports from user
def connection_save_clicked(self):
self.host=self.ui.hostLineEdit.text() # Get the text (Not sanitized)
self.port_data=self.ui.dataPortSpinBox.value()
self.port_img=self.ui.videoPortSpinBox.value()
self.config.set('Connection','host',str(self.host))
self.config.set('Connection','data_port',str(self.port_data))
self.config.set('Connection','video_port',str(self.port_img))
self.write_to_config_file()
cmd=self.format_command(self.socket_send_header_dict['COM_SET'],[self.host,self.port_data,self.port_img])
con_str=""
if self.ui.saveToClientcheckBox.isChecked(): # If the save to client is checked other changes are made locally
self.send_socket_commands(cmd) # Send the command to change the host and the ports
self.ui.saveToClientcheckBox.setChecked(False)
con_str = "The connection setting will be sent to client"
else:
con_str = "Only applied locally"
self.display_info(self.app_msg, "New connection settings saved. Will be used on next reconnect. "+con_str)
def connection_reset_clicked(self):
self.get_connection_settings_cfg()
self.ui.hostLineEdit.setText(str(self.host))
self.ui.dataPortSpinBox.setValue(self.port_data)
self.ui.videoPortSpinBox.setValue(self.port_img)
self.display_info(self.app_msg,"The connection settings have been reset to previous values")
def get_connection_settings_cfg(self): # Function to get connection setting from config file
self.host = self.config.get('Connection', 'host')
self.port_data = self.config.getint('Connection', 'data_port')
self.port_img = self.config.getint('Connection', 'video_port')
def update_connection_info_bar(self): # Function to set the connection text under the video display area
self.ui.hostLabel.setText(str(self.host))
self.ui.dataPortLabel.setText(str(self.port_data))
self.ui.videoPortLabel.setText(str(self.port_img))
def update_host_and_ports_input_section(self): # Function to update the user input connection settings
self.ui.hostLineEdit.setText(str(self.host))
self.ui.dataPortSpinBox.setValue(self.port_data)
self.ui.videoPortSpinBox.setValue(self.port_img)
# Deep Neural Network support functions
def init_neural_net(self): # Initialize the neural network defined in the FumeBotDNN file
self.display_info(self.app_msg, "Trying to initialize DNN. Please wait...")
try:
if self.nn_active is True: # If the NN was activated before a new initialization was done
self.neural_net_activate_deactivate()
self.dnn=FumeBotDNN() # Load the Neural Network
self.dnn.dnnOutputKeyPress.connect(self.handle_dnn_key_press) # Connection to the DNN keypress signal
self.display_info(self.app_msg,"DNN module initialized successfully")
except Exception as e:
self.display_info(self.app_msg,"Loading the DNN module failed")
self.display_info(self.app_msg,str(e))
self.dnn=None
def neural_net_activate_deactivate(self):
if self.socket_img_connected is True: # Check if the socket for video frame is connected
if self.dnn is not None: # Check if initialization of DNN module was done
self.nn_active = not self.nn_active
if self.nn_active is True:
self.display_info(self.app_msg,"Deep Nerual Network activated")
self.ui.controlDNNButton.setText("Deactivate")
else:
self.display_info(self.app_msg,"Deep Nerual Network deactivated")
self.ui.controlDNNButton.setText("Activate")
self.key_pressed_dict=self.default_key_pressed_dict.copy() # Shallow copy is used to reset the dict
else:
self.display_info(self.app_msg,"DNN not initialized or an error occurred during initialization")
else:
self.display_info(self.app_msg,"Deep Neural Network cannot be activated. No video feed available")
# Capture stream functions
def get_socket_stream_frames(self, jpeg_bytes, therm_jpeg_bytes): # Gets the frames and updates the display
# This is the normal RGB camera image
if jpeg_bytes is not None:
if len(jpeg_bytes) > 0:
# Convert string bytes to image
temp_frame_bgr=cv2.imdecode(np.fromstring(jpeg_bytes,dtype=np.uint8),cv2.IMREAD_COLOR)
if temp_frame_bgr is not None: # If the above operation did not produced a none
self.frame_bgr=temp_frame_bgr # The main frame for BGR data is updated
# Training images are collected from here
"""
Training image data is collected from here because if the resized image
of the final frame was resized to smaller size some of the error caused
during the initialize the resizing can cause loss of detail in the image,
although minor can be avoided if the original frame is resized.
"""
if self.enable_training_data_recording: # This produces the smaller frame of the current frame
self.frame_train_bgr=cv2.resize(self.frame_bgr, (self.train_frame_width, self.train_frame_height),
interpolation=cv2.INTER_LINEAR)
if self.nn_active: # Frames for the input to the neural network
# Preprocessing of image is all done in the DNN program to be presented to the NN
self.frame_dnn_bgr=self.frame_bgr.copy()
height, width = self.frame_bgr.shape[:2] # Get the size of the image
if width != 1280 and height != 720: # Resize the image to fit display area
self.frame_bgr=cv2.resize(self.frame_bgr,(1280, 720),interpolation=cv2.INTER_LINEAR)
# Thermal camera image
if therm_jpeg_bytes is not None:
if len(therm_jpeg_bytes) > 0: # This avoids corrupted or null byte strings
temp_frame_thermal=cv2.imdecode(np.fromstring(therm_jpeg_bytes,dtype=np.uint8),cv2.IMREAD_COLOR)
if temp_frame_thermal is not None: # If the above operation did not produce a none
self.frame_thermal=temp_frame_thermal # The main frame for thermal data is updated
self.frame_thermal_copy=self.frame_thermal.copy() # Copy the original frame for blend purposes
height, width = self.frame_thermal.shape[:2] # Get the size of the image
if width != 1280 and height != 720:
self.frame_thermal=cv2.resize(self.frame_thermal,(1280, 720),interpolation=cv2.INTER_LINEAR) # Resize the image to fit display area
# Display the image on the GUI according to the source selected
if self.normal_cam_enable is True and self.thermal_cam_enable is True: # Blend of two sources
aspect_changed=self.change_to_16by9_thermal_image(self.frame_thermal_copy)
scaled_result=self.scale_thermal_image(aspect_changed, self.therm_scale_val,scale_divider=10)
padded_result=self.pad_thermal_image(scaled_result)
trans_result=self.translate_thermal_image(padded_result,self.therm_pos_horz,self.therm_pos_vert)
dst=cv2.addWeighted(self.frame_bgr,float(self.norm_cam_weight/100.0),
trans_result,float(self.therm_cam_weight/100.0),0)
self.final_frame=dst
elif self.normal_cam_enable is True: # Just the normal camera
self.final_frame=self.frame_bgr
elif self.thermal_cam_enable is True: # Just the thermal camera
self.final_frame=self.frame_thermal
# The video frame update was normally done in this function but was moved to the update video feed function
def update_video_feed_display(self): # Function to update the video feed at whatever frame rate
"""
This function is called at a fixed interval to be display the new
image received from the socket. This is not tied to actual frame rate of the
images coming through the socket and therefore the same image can be shown multiple
times if new images are not received by the get_socket_stream_frames function.
Training data and Video recordings are also done here.
"""
if self.socket_img_connected is True:
frame=self.final_frame.copy() # Copy the frame
self.video_hud_display(frame) # Add the HUD elements to the frame
# Giving the frame to the Neural Network
if self.nn_active is True: # Can be run in the get_socket_stream_frames function
self.dnn.dnn_model_prediction(dnn_input=self.frame_dnn_bgr)
# The HUD elements are also saved
if self.enable_video_recording is True and not self.video_recording_paused:
# The video recording file is automatically opened in the below function
retval=self.vid_saver.save_frames_to_video_file(frame,timestamped=True) # Separate recording each time
self.ui.frameCountLabel.setText(str(self.vid_saver.get_frame_count()))
if retval is False: # If the recording is malfunctioning
self.display_info(self.app_msg,"Video recording failed, recording stopped")
self.enable_video_recording=False # Stop the recording
# The button state has to be changed here as well
# The training data is collected here
if self.enable_training_data_recording and not self.training_data_recording_paused:
if self.is_keys_active(): # If any of the keys are pressed
key_inputs=self.output_key_presses()
if not self.is_disabled_key_pressed(): # If none of the disabled keys are pressed then save
self.save_done, self.training_frame_count=self.train_data_saver.save_training_data(
train_input=self.frame_train_bgr,
train_label=key_inputs)
self.ui.trainingExampleCountLabel.setText(str(self.training_frame_count))
pix_frame=self.convert_frame_to_pix(frame) # Convert for the pix format required by Qt
self.ui.video_frame.setPixmap(pix_frame) # Finally update the video frame
else:
self.display_black_screen() # Display the black screen
def calc_interval_from_fps(self): # Function that calculates the interval for display update from FPS
interval=(1/float(self.display_fps))*1000.0 # Interval in millisecond
interval=int(round(interval,0))
return interval # The interval between frame updates
@staticmethod
def convert_frame_to_pix(frame_bgr): # Function to convert cv2 frame to pix format to be displayed in GUI
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB) # BGR to RGB
image = QtGui.QImage(frame_rgb, frame_rgb.shape[1], frame_rgb.shape[0], QtGui.QImage.Format_RGB888)
pixel = QtGui.QPixmap.fromImage(image)
return pixel # The pix formatted image ready to be displayed
def video_hud_display(self, frame_bgr): # Function to display Heads Up Display (HUD) elements
if frame_bgr is not None and self.ui.enableHUDCheckBox.isChecked():
if len(frame_bgr) > 0:
height, width=frame_bgr.shape[:2]
# CENTER part of the video display
# Drawing a cross-hair
cross_hair_size=40 # In pixels
cross_hair_thickness=1 # In pixels
cross_hair_color=(0,255,0)
cv2.line(frame_bgr, (int((width / 2) - cross_hair_size), int(height / 2)),
(int((width / 2) + cross_hair_size), int(height / 2)), cross_hair_color, cross_hair_thickness)
cv2.line(frame_bgr, (int((width / 2)), int((height / 2) - cross_hair_size)),
(int((width / 2)), int((height / 2) + cross_hair_size)), cross_hair_color, cross_hair_thickness)
# RIGHT part of the video display
# Display sensor readings on HUD
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.45
font_thickness = 1
font_color = (0, 255, 0)
RIGHT_MAX_TEXT = "XXXXX: 888888.88"
retval, baseline = cv2.getTextSize(RIGHT_MAX_TEXT, font, font_scale, font_thickness)
right_text_pw = width - retval[0]-10 # Place width
right_text_ph = 50 # Place height
text_line_space_r = baseline + retval[1]
text_vert_offset_r=0
string = "SENSOR DATA"