From 6b0b54692329b854f3fe58cb9bab9a9638532c95 Mon Sep 17 00:00:00 2001 From: darasafe Date: Sat, 24 Feb 2024 01:11:34 -0600 Subject: [PATCH 1/3] refactored --- refactored/LED.py | 45 +++++++ .../argument_parser.cpython-39.pyc | Bin 0 -> 1500 bytes .../__pycache__/data_log.cpython-39.pyc | Bin 0 -> 2678 bytes .../data_management.cpython-39.pyc | Bin 0 -> 2944 bytes .../image_processing.cpython-39.pyc | Bin 0 -> 1301 bytes .../__pycache__/logging_setup.cpython-39.pyc | Bin 0 -> 668 bytes .../__pycache__/model_config.cpython-39.pyc | Bin 0 -> 570 bytes .../power_management.cpython-39.pyc | Bin 0 -> 780 bytes refactored/__pycache__/run.cpython-39.pyc | Bin 0 -> 2241 bytes .../setup_directories.cpython-39.pyc | Bin 0 -> 1058 bytes .../__pycache__/setup_pipeline.cpython-39.pyc | Bin 0 -> 3090 bytes refactored/argument_parser.py | 25 ++++ refactored/capture.py | 39 ++++++ refactored/data_log.py | 70 +++++++++++ refactored/data_management.py | 113 ++++++++++++++++++ refactored/image_processing.py | 28 +++++ refactored/logging_setup.py | 17 +++ refactored/main-copy.py | 46 +++++++ refactored/main.py | 46 +++++++ refactored/model_config.py | 11 ++ refactored/power_management.py | 22 ++++ refactored/run.py | 97 +++++++++++++++ refactored/setup_directories.py | 20 ++++ refactored/setup_pipeline.py | 107 +++++++++++++++++ 24 files changed, 686 insertions(+) create mode 100644 refactored/LED.py create mode 100644 refactored/__pycache__/argument_parser.cpython-39.pyc create mode 100644 refactored/__pycache__/data_log.cpython-39.pyc create mode 100644 refactored/__pycache__/data_management.cpython-39.pyc create mode 100644 refactored/__pycache__/image_processing.cpython-39.pyc create mode 100644 refactored/__pycache__/logging_setup.cpython-39.pyc create mode 100644 refactored/__pycache__/model_config.cpython-39.pyc create mode 100644 refactored/__pycache__/power_management.cpython-39.pyc create mode 100644 refactored/__pycache__/run.cpython-39.pyc create mode 100644 refactored/__pycache__/setup_directories.cpython-39.pyc create mode 100644 refactored/__pycache__/setup_pipeline.cpython-39.pyc create mode 100644 refactored/argument_parser.py create mode 100644 refactored/capture.py create mode 100644 refactored/data_log.py create mode 100644 refactored/data_management.py create mode 100644 refactored/image_processing.py create mode 100644 refactored/logging_setup.py create mode 100644 refactored/main-copy.py create mode 100644 refactored/main.py create mode 100644 refactored/model_config.py create mode 100644 refactored/power_management.py create mode 100644 refactored/run.py create mode 100644 refactored/setup_directories.py create mode 100644 refactored/setup_pipeline.py diff --git a/refactored/LED.py b/refactored/LED.py new file mode 100644 index 0000000..5c3fd7e --- /dev/null +++ b/refactored/LED.py @@ -0,0 +1,45 @@ +import RPi.GPIO as GPIO +import time + +def run_LEDS(): + # Set the GPIO mode + GPIO.setmode(GPIO.BCM) + GPIO.setwarnings(False) + GPIO.cleanup() + + # Set the GPIO pins for the LEDs + UV_LED_PIN = 22 # UV light + WHITE_LED_PIN = 17 # White light + + + # Set the GPIO pins as outputs + GPIO.setup(UV_LED_PIN, GPIO.OUT) + GPIO.setup(WHITE_LED_PIN, GPIO.OUT) + + # Turn off the LEDs + GPIO.output(UV_LED_PIN, GPIO.LOW) + GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED + print("LEDs OFF") + + time.sleep(1) + + # Turn on the UV LED + GPIO.output(UV_LED_PIN, GPIO.HIGH) + print("UV LED ON - Attracting bugs") + + # Wait for 5 minutes before turning on the white LED + time.sleep(5) # 300 seconds = 5 minutes + + # Turn on the white LED + GPIO.output(WHITE_LED_PIN, GPIO.HIGH) + print("White LED ON") + + time.sleep(55) + + # Turn off the LEDs + GPIO.output(UV_LED_PIN, GPIO.LOW) + GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED + print("LEDs OFF") + +if __name__ == "__main__": + run_LEDS() \ No newline at end of file diff --git a/refactored/__pycache__/argument_parser.cpython-39.pyc b/refactored/__pycache__/argument_parser.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef7dde4fa896436cadd06a441e5cd0b612103da5 GIT binary patch literal 1500 zcmaJ>PjeeJ6yII1osILSG->&FdN9MpOS*P5lLG^Vq3s2xLr7b=_~MaQvaLkBS|qK5 zcbXg5%y8hq5xDV1`XTnpiLbz=ebU;AVTQ`H&(eEOKfU*RdLlbJT@S~}%a^Qfd){Am zu|5J^e2JgFgMxb07v9jL0d4){4SniA_u}wxw0I|}m8%irB*O18e)?Aw#ydlA8eI98 z-V5&x_tTKJX@_oHg?^)n=qBybt!Y5JrbV~u&Q)h=*`>G8_pLSG+jI{#`)isz^e$@N zUTJ;{fA=o^7yemW{X*ZN?}|>gaT)j?&fgPJwt3k?Y#aUi)q^hJI3-Fm4XMmBlG8-+ zoB>hOGGn>XFp&z(3^hd|cuEYHdEafTK73Z@AjfAcH9(ocKePctfKenp=lQfB`@=2D zbgFn^ja4f-KCkwZWFkv7Iv**fr6{eX?rI|y8yQuy>g!a=;sz39P%?wwA!ssZ@YPdX zlMEx~c?`!-;0EP0+%zGjFz`7X-T&l%5eIdLxVY+Y#zaxI6N_DKCGLMT9?Ktx9sNT| z6dSe;pUzD6hdaf(6^yP&aly?Dh%Rit$`S!bpB_Q)z(GL+au5UZ_2zNTSb<)A%5x&X zh5(vZ3>^NPC{D%#`CdP0b%=}bAdbU&Bb`Z}GF@*hQ?5g^D44R>uL=`IE~?u}VpAF6 zeAF;jJtdTK%Qq2X(a3Y3RuIgQVfh7R3VN(R?cZxIgCab~;sPax#8s5Y zIa7ixs=Lecmh07-C;xNiO&wk0%U#fOgdAdVCo)F)-%v0)A*Z&2E;*d*>StH7XQvz< zpMDRA@a@AVkj{7khGobJF{NVnz}m^2aN+pPSSHm1dfz;XamSm7Ma63>c;)Ut^gfE+ zJs%Te7=qYX!AR(*dJYpVnDVgUut>@ZO)!RtlXw{B&OB+>pou^gmdh?ov;RmG9t3&$NGuoPNmlab| zZQ3Zqy5sOv9a==^<1BxAY7ZU$IUdYp#s&o+@Lc2NPnu>QDD1V=b}b#O;u|$c{bEty zx(>2=NLmP`KStq&+kV#%UUh;!D_hNcO@@)*@wf4(?86%e%0AO>?&4V!`)H`62br|Q MeqlM+_R)@l*Z)GmnE(I) literal 0 HcmV?d00001 diff --git a/refactored/__pycache__/data_log.cpython-39.pyc b/refactored/__pycache__/data_log.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc74d908ac7cdaa7d2f60791c6ce06760e6500d3 GIT binary patch literal 2678 zcmZXWNpl;=6@a^E!7SKFP@*ZxPUgajK#PKsQw~nbw&)~G%8E*Iq|#i*Lv#ZiXtwk; zs1P;GAxw@c>+Yxu$j!I>lvIv=&51uCIq2l~dSH=KW(wW&UaxQIuMu~K^vVJnxHdNE3-pe>#VeuIiaJ!?bOY@ z&|CNQ!!~O%=dUE}Fqe5ycNu+5hW-VtL>>&Oa#<|7OfnAN*27pHL+w0FemhUbsOsH+ z_=m4}R&o)`x!~g#j$1!%s5|hx55Fp)ge>+XnbF$#7EGuql*Vg|nap}=oVDr}vu}{4 zu_9;AQ zB5dQ_kQ;JS4)m&dcYO}VO_jmuyhjl-w z{S~F;DPe7kSmfNKBp>{gyrR|Mp0O#&?VelH&!whGQwndPldS=_Se05&Z2h| zC`4J3hh9FM+(%Er>Sbc zz_=a5WH1^xpv=5@q1;M}3Hm_UqL}9ldZXZFMPA4vPsYkjVNxqipreVytZ7;tsa9Fd zWs)jiduU$8Q?4v@lydb#qIfZEH!(_>@-SA~W6G=I6CRatP6d#JsP+{PD!&P$4v@np z5#~zhH)cl(gMdH^L^v5{Q|PRv(1ni*_1_V37iN94KRPZlJ}Q$@l2?2z_ZgQ^K_X4! zu`C2sux`iY37AR6`LK!MWeV-qyQmh`Bd>jOLkAa#$^}s9~=ONdVuCW3}@f z#C4h+=?g#{V50?va;IfdEI8af+UuewHXJ&f1mIFo{Gt7T`h%>I9Yot8v`aG9YWwbg|+LB~s)r zJv)>oVwsClpa&b#wRHqYMK8VgoO6J-Ku<+G*PaOU(o2D+K-KqVC7D6e&n|Xm-gn-- z-;STpX9+yNx&3GU+pC291%dv@0N`EtA{-cDv_V{)jRtK}m(m1hxQ3>gt_ic*u$n11 z)l9o-pj#~UDRBoFT_>e<544a6C8})ZOD=0oUNTg+D*Pt*f>1P7q3Q2(Z)+>u_o63l zU+_vEMb4iYm=V4UU-YR(h$OM~0<4Nn7>#3#nYiC`^T_(_VK*;n-H7wsgr$E;*}!w7 zI~3KNGG;wTqC$!xY@fg}2PsD>rdxylilL=fg`_y2=lK>}pd4*j$m!3>;Me>3i zTZa(&3z!vljUz+6+xY~dF~Oz|jn974y~2v|l^X3%vT1e|<|#Q@pNg~bWIV-Yo}1lb zT&x$_DamHMg92b2*2n2p>I8cYdCfK^K5qtGU}Ch>J)vGKe?uJYd}1UGe>4r2fx1xalMS% zWlSN$Y<7}V4QbaMHpgCLBc}tDB>erBy}*1(@&IMNdnxn!*JW5KiP-VWyg~jqF8SZ= z{H5*%&|Uby>@Q!+EPh?)KiGfsl7FO+ z|9{(GK>PdZ`q{Ts%iFq6Zb4mN)%9`pysn>pBk!zeuRrhDb?9nKosD&Wk2@6+wpyGy zj7wgTwJ?aB*WYxi?MB17_ajF)xv1pG&{?RrwjDoUjud`nmkTFs%T`+=smY}etYTUO`4hfS0Rm5|kf?NZiFL#y=GR#XO?Y9#$;OQjnA7H_C* zB@C)H2E5$0^^U;jruMyNEpSr@$pnzUuOXN+)mnplEnn`GsGF_Uc!LGdp(8<|@Al5% z>5R^8&Nb(lH+OG-?!)=H^@X`=Csh{y({ji5V8SD+fkzMDyZgcFib^NFyr<<&RD%DN zPNsbBvhr87tf<$?c!)sRPI0a2Z-Y<6b|9TS)R@yTmK|k?EoGsZZpN=@COFf~ z=IymxKWMgYv%8N&KiFN`c>Do>bay@cXr*-%el#f*h?Ip6?tO_CjIOsU6&^(xqys17 zuvaICad^tNcEWIXp|kM54?+;C5XsD`X|wt`oOS8Tb_A9(zU(~cjJ+2)TnMl@tW?@U zaJKC9;8#U@`1jiW{qOMk>z$I>I|Eo0FNWT2B>&>)`1I~{%(a!}-j83R{st1-|H*)2 zy+tJ7($7Ed^tN6y|Km>YR{(*7!#yq<{z18u^Ujy(-O>$Zg)JT^6H>~}uGA`WL)0V} z%96a%a0j0vw8MP{C&5rDGYFq5JCdS``2})&Z@UAvrsjk2Y8NOAvqWHg)o>&#Yt8MI zi&9Y5KBSwn4)nMW$%boxU!Z4%jUq;kDRY}km1>DvAeAlnlQyJsHTN@<6)>O68j&|GusH;bOU%KP(|M0;h@80Ue^?R$E-ul{y zAKhPl>}H^9q#tZIcxeVi@E+o(b&W(yR}|QUJRa;Vq{>84=ejUdS{GLY2SpTQyq6TX z8qTX?&k;8ECU>k|x~XPeg+4V7=e%_x$dPP9l3>KP6Vb>QYN zy8&jhvM)fD(l(0$?E^Fhi`TXA9_8B5k91vWH@k@`NLREf#?k!~i5QPs(6hLr+|6Cs zk9@x-0xw)&EtPk|CNH;Yp*2ABzZ_d z^PE$Lls8*M_%X F{2Rk*H;(`S literal 0 HcmV?d00001 diff --git a/refactored/__pycache__/image_processing.cpython-39.pyc b/refactored/__pycache__/image_processing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..808723c16cbf3e8a4a72fa38d903326ccb33886e GIT binary patch literal 1301 zcmZ`(Pj3@B6t_JyX_}=_Efu6Igyu-3wpoz&h7b~3X^{|@z2y>_vD28$*n{nK8#KKD z<;+K*7o`0pEAat1%#~A*TsiTx(}ZdTj{N-IoA-Y2kDtT0wt57MUjI*i$q4xzgv*OT zc!=$8fe0dKNk&vKk-Q-zCQ^~%Oa%R#3_9nqAWw_nv04cjppr+}?k^Be-c!#S`kFT6 zIsA6X89QN-XS<{UE`I50az9w6*jYo*s6fQCq+vVceaCN{g=pE!KOY%=B(2Y_EV)-l zGJj^~S}1)8<~T2mu@JbIF2D2p{@p?DOGSzi8hU@5XX59H~2$b8UknXcP(GB3+E zDWz^RH|4c#)1p*$n{jLTqU~_!7f>cj`$5uX6U!@@JVW5orZ%=3AMw)tOYBuZe*I%O zHI*FJYN)i6h2Iy_;}}{w;e|I=ilM6bp&ZxN6w*1Aa8NJWUPK)SxX?$}0U_)L-Jy0H zToIJ3wE-sV*yv9vr$N)uP&96r^vOvAE{Sp)Wwf?Qn)EgEbb0o1wZD;*EJ!o*j@s_h zx5@B@&4_)|1T=UrkxrSwHO3g3pgZeGH_0iRAm&L&ziOCmS2j1pUz?k9%w^1R+-111TW}ACJ|>H@omH4 z_hID|D}l!>joMW$tL)buT4xW%vOzFz#W@oq8#AqcQS+ z3PE7n%_R3znO91yYF_0+RZ=@;^j>Z(Ov^>EH(EyXTut)Q6ubnE$hpts(B&1khe{vV zFk{On61n(7SesP*7)0s8hP@Ijwm{mRmClrNk5HptNN)U(5`H@RCGu0L4yS&!83myx zleUXzFfOdA<11N`YXNAx;xjpphVI3jTe%6XyA49vHr=Lu+5^3Z^8q_xzk<63Zjag< quo$Fmrsq|?uqmYYQlkD+knkvNjMTkf88I*AgYXC7feJKq4&UoK$) literal 0 HcmV?d00001 diff --git a/refactored/__pycache__/logging_setup.cpython-39.pyc b/refactored/__pycache__/logging_setup.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f064f97446437be949eed3844db32d8eb6e905c9 GIT binary patch literal 668 zcmZWn&5G1O5bo~$OlH;r8O8H>SeP7U0-i)fgoOn`MnO-x487?}re~9M=-)M=RjCx8PA3HA%a70Sff4f6gPVdJd5UTlXao_o zCi(A^<`gljSrg;|B_a^vdy>|35M_g_fP&)T&yc_R-#!%ZL5qaKTMsWdo_1FXxAg z8qT0@xyO$XHed{|5Tv0?N?C)y!b)wSglsDpmr6I>x@dT>zxP~K74maw=7nlYS?x85 zReGTPq+QgU3aQ-)8}L#2!3j6AI2r<7_-uXrA|F>^eGdxJC@cdkSOOgL= zx6u$b3lbXtj9Ee_Y{K+i}z=9H^cbV JNN7TDv0uVFwjKZg literal 0 HcmV?d00001 diff --git a/refactored/__pycache__/model_config.cpython-39.pyc b/refactored/__pycache__/model_config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e60ed5916244f31bd94c24b047b64768a6eccc08 GIT binary patch literal 570 zcmZuuQA-;^5Z>9_#6ykLSHX7=G?%1GeGpO9YN19!r8H2E%k7>yx$K79GmsFU?5p%I z)W`m$?rWd=7kqLiDkwP1e6u^d-`sq2ajiDX(3XGvRTse6KRUcNj)xs|+dz;^LeBco z0iZq4d50^I-Ys-Ii~^&O6BfXja0b} zM~my5zmrrNfbybMgnC@ z*ZzW{fL)+N|4GNrTs!G6WND9ThaAE8c;xZ)bjNr+4hY)C?l1W65c1nPkBy;oic_5+ zaKdRpN*YmW*Q{U>>)DB%p54erb_%cbBOha2?tLQBfU`>y`W3VAsq=avOqkP22pBe6KuSa~xp0%`Pb1QfE zytA^p0|evpSjXKTvNe0QXvqcEU(l)^OO2@iU(at;2 z|MGpX>5P+tHl7qs#f@L9M(0Ho@LVltjfzu<+_?lWZeGoW2~=}cOOb$5#*s~BT%DI- zJXHYHp=_P&z_{$=FFInardfXLlPp z2x+;l*aPB%iV{w6L5MRyg0ruja^-}2;$s}WfJl#{{pYAbFyf!U6OOuA%)xAQO@jB01LK>~xFis}&#g&{ z7*jk+bE}(~VvM-GhsI@5?q&seKrChJzJ-w}PjXX)Q5%gbA~&vzD#`RxtV!(gLijn! zimHW7)Wd8W=!kt4!kI8la=^V#tRwEXBn#Xh4!L>Yu9LYV?r+2b?(c`(0&p!-950Cc zBv0l=;d?|%d`>Ko@(Hwu0jr2A%+=@Ga#89Q$O2g;OJsT994~5ojh9D% zCy-bc3(Kg74lw(LmqcX`lhp(h*=~7KnW9ORtdSE_LwlR>J`t74f~cxUiN(p{RGppx zRE6D3AbmwF9Z{DQwYxlCy~0ml*O0r}L8z1#}Z+!M|rh zO;p}Q2S)te6>S4XPn0DPot6Q;@VaV)eBa&uIBsqYC?!tb9mFqtLE3)EDcg2~*(oS( zX{t6bWG05D2uUmrpUiFp!~Y_S(CjS**%_FAwE$HO;eQIv;9tZDjqFF%tKGY~SsN8< zqNWh;_vvPBb`AE87HfS!1p1AR%i6RS&}|xQ)?VQAGSy;kpUyxcx?MM=6$0jDz~U`` zM5WyfBCqF%ZJFW>nt7%!8EL5wop}JxKs6fWKpk{3Y^#mdYArwXW9ar*)Qybux!H$G zK*>{f!2Sje-DW_^M|DeD@sLE)jypq6qMdMLTWJ*NGVKlqG$hjOxNsi=<;M1Hl@giN5f3PjSxN;h6$g|Kn97t) zLxHsXwk&TQN(5JKB&1wCYz|oD(KwbS8-_A_YuBR#?nj|aQ5C>MGHpUWWv&&mzRMeQ z*Q>8FQ^}s!*E4f>8oD%MS=EGSo3g-t_L7+OSTtxfo6)W;x1u3y^csxDQ83hs@;kKI ziK1R($O2giTux)&@cV9?#xk$hh6m3Acptb;8pJX)@Vi5msIo2%sw=dS#5=8WrSdcI z2Q=_QD$|hrhEHTxt;we3Q5m?rBhy6Z+{qp_nlm4O{$rAz{BguqVpQgw%+tg_1?ccQ z@FqddHJ8D=3Zd8Z_-DjaDZhAQqZ9S%#=zh3L+D$6iBJy92BR(4Q_mUMfFxWVJY$=% zy#OP2U_#cCY2gY^!>|FeOdH^pe}`e27t9hanYLjVHM4?qIA`YYoT1j5Su|I01@Mx& zWEOD|PA1q@to783>>?;(=U|c<0_pF#KC3D0%&`mT@==81%c>lbw^~;{dt04<2PVf> z?XI3^T^~sS&TCrV4I!m{8ggBtTD1<#vFV{wYG3O|>iK_H-50TX+ik4=IX#!~TUVc1h8o=ui(fkSOWUDF_NQLze~x(xn0dLlNm%jbte#^AhOE z>DZxj5WpVu5Be*L&RjcX?v|-XB~9XFD}_7W@s7It?v7!jF@<2Q&i~>+2tvQDa(_9X zyakf4fHA~yf>IP9Y-K0GsS`LL6Gl%_Kv-o9wcJbaL@jJQ7Amgdl!Ht^hN>4qfZJ6A zsRHy4Nd5$-&5z#6vuA!8S8t zo4vJFwX3TOXy9q8Jx#g<$}qQC)SI81X_IxD@0Xe%il=d3@<=T)u3+<_IH7;(0q`sA z7d1Z;S)Oy|e`-0Vnumh-l{6kdj-~2kdj<7HcvN_PcF08%ocw)w?v%u!9U^6WUglScUt)L>%OmcDAG)s5-8bzXz8DApigX literal 0 HcmV?d00001 diff --git a/refactored/__pycache__/setup_pipeline.cpython-39.pyc b/refactored/__pycache__/setup_pipeline.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5c5d4bedb016d5b17afe75657287186c602eba4 GIT binary patch literal 3090 zcmZ`*NpsxB6~;3AJ|r!Pij%;U7fx)G@{+`*vQn1El+3a?q&aj|(uxWNqI)&!(;wP&OYPnlYT;c^6Lf$8hXJLdS3TizVE%>;I-Qg1D^j|-;)3I z4dXl2S$!<%d;+go*xru~q4*qt#< zLf2=}gr51P(&f(v{FmSz!Ylp_Akz4!@mK58ytEQ)VPcaSSJtJS*dSY4E+r+9Z7r9z zT+(ty%VjNBwOrA1P0Lj+*R@>Jazn?jYq^;;A!b9%Eom+6E90`Av==351?8Y}WnOlY zPEcLca1*z#EYnCji#q5UK{IGwnb0@H|6r_*+qjeKJGhIxx!lD)ZPi)y3aPLC*Mjw2 zvw{0sTB5!Js9iw4vACIZgIih$|BKB$LU(aHF|*Oduaa^uy_x`9;L~0FI;rN;Z<1Or z{WhuRl9M!Y=`~=iJeIgyfZI!Yi#x#Y_23Py>n+}d-dn+T<`WEnQ!n`4wcgu#?;Wjs zSNE1NcOYgz=`VI6%H7~St?LBugJnPX0IzKtdL);N410l!rAF6REI=+S`il?o#x5jcV&c;Y_1zhf9`>Y{R{hWuf{3)~GsZEEJPDnu7x`1hMcT>)cjk$6k+vzDyK>5j zn6eP38+u=068YriD5}l8IHu8=NGqXtNb4H{qF2D}E+I3q4y5K#yr;QZm*4EaL&0>!eC+Filr*fKs z{TRvdA`D~D=ztELI6%bILxNBd#`PY6JPy#vVnrBN~&CMg&!S zy$w-G@kPxc3mG56p5-3u(1=Rvg+r(iXCdkfA`iR>_Q2lPF%xr6P#cDhiD2PeQWl|C zR@{#`iwT#M2-F*sNHD%)SUvfC&;8K-0yQDPzKF-Z7ZTKg&JiciD0x1Hq|xTf{uhMA z57ZGSk3E3Wru&g3JS1>pU`<4WA974MYOKiir;mJ`I_F4kzBSb9&^-sYZjXIn2&iU6^5U#A@mdzv-s^jPK$wds4 zxY}7qd&h_F$=>mSyMOTD(ca^|@ff`dfVt5=ejo%ri)JK}M=YfN1={>&RsE~c{+D0v zx%;1?W(=1LTo`PAHbs41vE1*WI`h%G9x6($2U8oi0qS1Y2viySoW>HhfEFwpp9okj zs9lIUtJ>qe!;w1#_T^_#D26IJj6ikp$^+E?{21o%5HQrj97`zIY^2Ck0T-rJ3^vmK zgw45o?!ryCT(*tuQ!s|9O)L81JL7Oq8K?XVR{RZ~$-RW&u$)S<~t^V^ZD zyDzQ9%RN^0y_w6;PLUNwyrZluBFnh;H4xLPmX>5TrT;bfEin83gWV~ck=>Z?(n!FJ zzl-(C-Q{HB`I2#hcOlPt>@J~p;)}GK?dW1m@fHCzhS{)N=1+ch&Fe7zf}wfe=a fPb6)pQ(b8`!8WaZ494@2{7HRQ2$e$3Y*{}5QWuZM literal 0 HcmV?d00001 diff --git a/refactored/argument_parser.py b/refactored/argument_parser.py new file mode 100644 index 0000000..ca44a86 --- /dev/null +++ b/refactored/argument_parser.py @@ -0,0 +1,25 @@ +import argparse + +def parse_arguments(): + """ + Parses command-line arguments for the application. + """ + parser = argparse.ArgumentParser(description="Run object detection and tracking.") + parser.add_argument("-4k", "--four_k_resolution", action="store_true", + help="crop detections from (+ save HQ frames in) 4K resolution; default = 1080p") + parser.add_argument("-crop", "--crop_bbox", choices=["square", "tight"], default="square", type=str, + help="save cropped detections with aspect ratio 1:1 ('-crop square') or \ + keep original bbox size with variable aspect ratio ('-crop tight')") + parser.add_argument("-raw", "--save_raw_frames", action="store_true", + help="additionally save full raw HQ frames in separate folder (e.g., for training data)") + parser.add_argument("-overlay", "--save_overlay_frames", action="store_true", + help="additionally save full HQ frames with overlay (bbox + info) in separate folder") + parser.add_argument("-log", "--save_logs", action="store_true", + help="save RPi CPU + OAK chip temperature, RPi available memory (MB) + \ + CPU utilization (%) and battery info to .csv file") + + args = parser.parse_args() + if args.save_logs: + from apscheduler.schedulers.background import BackgroundScheduler + from gpiozero import CPUTemperature + return args diff --git a/refactored/capture.py b/refactored/capture.py new file mode 100644 index 0000000..9030805 --- /dev/null +++ b/refactored/capture.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +from logging_setup import setup_logging +from argument_parser import parse_arguments +from power_management import check_system_resources +from setup_pipeline import create_pipeline +from setup_directories import setup_directories +from data_management import store_data +from run import run + +# Other imports remain the same +# import csv, json, subprocess, sys, time, traceback, etc. + +latest_images = {} +image_count = {} # Dictionary to keep track of image count for each track.id +webhook_url = "https://nytelyfe-402203.uc.r.appspot.com/upload" # Webhook URL + +def capture(args): + + # Setup logging first + logger = setup_logging() + + # Check system resources and manage power + pijuice, chargelevel_start = check_system_resources(logger) + + # Create the DepthAI pipeline + pipeline, labels = create_pipeline(args.four_k_resolution) + + # Set up data directories + save_path, rec_id, rec_start = setup_directories(labels, args.save_raw_frames, args.save_overlay_frames) + + run(args.save_logs, args.save_raw_frames, args.save_overlay_frames, args.crop_bbox, args.four_k_resolution, webhook_url, latest_images, image_count, labels, pijuice, chargelevel_start, logger, pipeline, rec_id, rec_start, save_path) + + +if __name__ == "__main__": + # Parse the command-line arguments + args = parse_arguments() + # Pass the parsed arguments to the capture function + capture(args) diff --git a/refactored/data_log.py b/refactored/data_log.py new file mode 100644 index 0000000..3d938c2 --- /dev/null +++ b/refactored/data_log.py @@ -0,0 +1,70 @@ +import csv +import time +from datetime import datetime +import psutil +from pathlib import Path +from pijuice import PiJuice # If using PiJuice in this context +from gpiozero import CPUTemperature +import pandas as pd + +def record_log(rec_id, rec_start, save_path, chargelevel_start, chargelevel, start_time): + """Write information about each recording interval to .csv file.""" + try: + df_meta = pd.read_csv(f"{save_path}/metadata_{rec_start}.csv", encoding="utf-8") + unique_ids = df_meta["track_ID"].nunique() + except pd.errors.EmptyDataError: + unique_ids = 0 + with open(f"{save_path}/record_log.csv", "a", encoding="utf-8") as log_rec_file: + log_rec = csv.DictWriter(log_rec_file, fieldnames=[ + "rec_ID", "record_start_date", "record_start_time", "record_end_time", "record_time_min", + "num_crops", "num_IDs", "disk_free_gb", "chargelevel_start", "chargelevel_end" + ]) + if log_rec_file.tell() == 0: + log_rec.writeheader() + logs_rec = { + "rec_ID": rec_id, + "record_start_date": rec_start[:8], + "record_start_time": rec_start[9:], + "record_end_time": datetime.now().strftime("%H-%M"), + "record_time_min": round((time.monotonic() - start_time) / 60, 2), + "num_crops": len(list(Path(f"{save_path}/cropped").glob("**/*.jpg"))), + "num_IDs": unique_ids, + "disk_free_gb": round(psutil.disk_usage("/").free / 1073741824, 1), + "chargelevel_start": chargelevel_start, + "chargelevel_end": chargelevel # This needs to be defined or passed to the function + } + log_rec.writerow(logs_rec) + + +def save_logs(rec_id, rec_start, chargelevel, pijuice, device): + """Write recording ID, time, RPi CPU + OAK chip temperature, RPi available memory (MB) + + CPU utilization (%) and PiJuice battery info + temp to .csv file.""" + with open(f"insect-detect/data/{rec_start[:8]}/info_log_{rec_start[:8]}.csv", "a", encoding="utf-8") as log_info_file: + log_info = csv.DictWriter(log_info_file, fieldnames=[ + "rec_ID", "timestamp", "temp_pi", "temp_oak", "pi_mem_available", "pi_cpu_used", + "power_input", "charge_status", "charge_level", "temp_batt", "voltage_batt_mV" + ]) + if log_info_file.tell() == 0: + log_info.writeheader() + try: + temp_oak = round(device.getChipTemperature().average) # This might need to be adjusted based on actual method to get temperature + except RuntimeError: + temp_oak = "NA" + try: + logs_info = { + "rec_ID": rec_id, + "timestamp": datetime.now().strftime("%Y%m%d_%H-%M-%S"), + "temp_pi": round(CPUTemperature().temperature), + "temp_oak": temp_oak, + "pi_mem_available": round(psutil.virtual_memory().available / 1048576), + "pi_cpu_used": psutil.cpu_percent(interval=None), + "power_input": pijuice.status.GetStatus().get("data", {}).get("powerInput", "NA"), + "charge_status": pijuice.status.GetStatus().get("data", {}).get("battery", "NA"), + "charge_level": chargelevel, + "temp_batt": pijuice.status.GetBatteryTemperature().get("data", "NA"), + "voltage_batt_mV": pijuice.status.GetBatteryVoltage().get("data", "NA") + } + except IndexError: + logs_info = {} + log_info.writerow(logs_info) + log_info_file.flush() diff --git a/refactored/data_management.py b/refactored/data_management.py new file mode 100644 index 0000000..674fe77 --- /dev/null +++ b/refactored/data_management.py @@ -0,0 +1,113 @@ +import csv +from datetime import datetime +import cv2 +import requests +from image_processing import frame_norm, make_bbox_square + +def store_data(frame, tracks, rec_id, rec_start, save_path, labels, save_raw_frames, save_overlay_frames, crop_bbox, four_k_resolution, webhook_url, latest_images, image_count): + + """Save cropped detections (+ full HQ frames) to .jpg and tracker output to metadata .csv.""" + with open(f"{save_path}/metadata_{rec_start}.csv", "a", encoding="utf-8") as metadata_file: + metadata = csv.DictWriter(metadata_file, fieldnames= + ["rec_ID", "timestamp", "label", "confidence", "track_ID", + "x_min", "y_min", "x_max", "y_max", "file_path"]) + if metadata_file.tell() == 0: + metadata.writeheader() # write header only once + + # Save full raw HQ frame (e.g. for training data collection) + if save_raw_frames: + for track in tracks: + if track == tracks[-1]: + timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f") + raw_path = f"{save_path}/raw/{timestamp}_raw.jpg" + cv2.imwrite(raw_path, frame) + #cv2.imwrite(raw_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 70]) + + for track in tracks: + # Don't save cropped detections if tracking status == "NEW" or "LOST" or "REMOVED" + if track.status.name == "TRACKED": + + # Save detections cropped from HQ frame to .jpg + bbox = frame_norm(frame, (track.srcImgDetection.xmin, track.srcImgDetection.ymin, + track.srcImgDetection.xmax, track.srcImgDetection.ymax)) + if crop_bbox == "square": + det_crop = make_bbox_square(frame, bbox, four_k_resolution) + else: + det_crop = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] + label = labels[track.srcImgDetection.label] + timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f") + crop_path = f"{save_path}/cropped/{label}/{timestamp}_{track.id}_crop.jpg" + cv2.imwrite(crop_path, det_crop) + + # Update the latest image for this track.id + latest_images[track.id] = crop_path + + # Update image count for this track.id + image_count[track.id] = image_count.get(track.id, 0) + 1 + print(f"Image count for track.id {track.id}: {image_count[track.id]}") + + + + if image_count[track.id] == 3: + try: + with open(crop_path, 'rb') as f: + #Open metadata CSV + #with open(f"{save_path}/metadata_{rec_start}.csv", 'rb') as metadata_file: + # Prepare the files to be sent + files = {'file': f} + #'metadata': ('metadata.csv', metadata_file) + + data = { + 'accountID': 'Y7I3Jmp7dCXoank4WXKeTCSoPDp1' # Replace with your actual account ID + } + response = requests.post(webhook_url, files=files, data=data) + + if response.status_code == 200: + print(f"Successfully sent {crop_path} to webhook.") + else: + print(f"Failed to send image to webhook. Status code: {response.status_code}") + except Exception as e: + print(f"An error occurred: {e}") + + # Save corresponding metadata to .csv file for each cropped detection + data = { + "rec_ID": rec_id, + "timestamp": timestamp, + "label": label, + "confidence": round(track.srcImgDetection.confidence, 2), + "track_ID": track.id, + "x_min": round(track.srcImgDetection.xmin, 4), + "y_min": round(track.srcImgDetection.ymin, 4), + "x_max": round(track.srcImgDetection.xmax, 4), + "y_max": round(track.srcImgDetection.ymax, 4), + "file_path": crop_path + + } + metadata.writerow(data) + metadata_file.flush() # write data immediately to .csv to avoid potential data loss + + # Save full HQ frame with overlay (bounding box, label, confidence, tracking ID) drawn on frame + if save_overlay_frames: + # Text position, font size and thickness optimized for 1920x1080 px HQ frame size + if not four_k_resolution: + cv2.putText(frame, labels[track.srcImgDetection.label], (bbox[0], bbox[3] + 28), + cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2) + cv2.putText(frame, f"{round(track.srcImgDetection.confidence, 2)}", (bbox[0], bbox[3] + 55), + cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) + cv2.putText(frame, f"ID:{track.id}", (bbox[0], bbox[3] + 92), + cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2) + cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2) + # Text position, font size and thickness optimized for 3840x2160 px HQ frame size + else: + cv2.putText(frame, labels[track.srcImgDetection.label], (bbox[0], bbox[3] + 48), + cv2.FONT_HERSHEY_SIMPLEX, 1.7, (255, 255, 255), 3) + cv2.putText(frame, f"{round(track.srcImgDetection.confidence, 2)}", (bbox[0], bbox[3] + 98), + cv2.FONT_HERSHEY_SIMPLEX, 1.6, (255, 255, 255), 3) + cv2.putText(frame, f"ID:{track.id}", (bbox[0], bbox[3] + 164), + cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 3) + cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 3) + if track == tracks[-1]: + timestamp = datetime.now().strftime("%Y%m%d_%H-%M-%S.%f") + overlay_path = f"{save_path}/overlay/{timestamp}_overlay.jpg" + cv2.imwrite(overlay_path, frame) + #cv2.imwrite(overlay_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 70]) \ No newline at end of file diff --git a/refactored/image_processing.py b/refactored/image_processing.py new file mode 100644 index 0000000..62c668a --- /dev/null +++ b/refactored/image_processing.py @@ -0,0 +1,28 @@ +import numpy as np + +def frame_norm(frame, bbox): + """Convert relative bounding box coordinates (0-1) to pixel coordinates.""" + norm_vals = np.full(len(bbox), frame.shape[0]) + norm_vals[::2] = frame.shape[1] + return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int) + +def make_bbox_square(frame, bbox, resolution): + """Increase bbox size on both sides of the minimum dimension, or only on one side if localized at frame margin.""" + bbox_width = bbox[2] - bbox[0] + bbox_height = bbox[3] - bbox[1] + bbox_diff = (max(bbox_width, bbox_height) - min(bbox_width, bbox_height)) // 2 + if bbox_width < bbox_height: + if bbox[0] - bbox_diff < 0: + det_crop = frame[bbox[1]:bbox[3], 0:bbox[2] + (bbox_diff * 2 - bbox[0])] + elif resolution and bbox[2] + bbox_diff > resolution[0]: # Assuming resolution is a tuple (width, height) + det_crop = frame[bbox[1]:bbox[3], bbox[0] - (bbox_diff * 2 - (resolution[0] - bbox[2])):resolution[0]] + else: + det_crop = frame[bbox[1]:bbox[3], bbox[0] - bbox_diff:bbox[2] + bbox_diff] + else: + if bbox[1] - bbox_diff < 0: + det_crop = frame[0:bbox[3] + (bbox_diff * 2 - bbox[1]), bbox[0]:bbox[2]] + elif resolution and bbox[3] + bbox_diff > resolution[1]: + det_crop = frame[bbox[1] - (bbox_diff * 2 - (resolution[1] - bbox[3])):resolution[1], bbox[0]:bbox[2]] + else: + det_crop = frame[bbox[1] - bbox_diff:bbox[3] + bbox_diff, bbox[0]:bbox[2]] + return det_crop diff --git a/refactored/logging_setup.py b/refactored/logging_setup.py new file mode 100644 index 0000000..1b76c8d --- /dev/null +++ b/refactored/logging_setup.py @@ -0,0 +1,17 @@ +import logging +import sys +from pathlib import Path + +def setup_logging(): + # Create folder to save images + metadata + logs (if not already present) + Path("insect-detect/data").mkdir(parents=True, exist_ok=True) + + # Create logger and write info + error messages to log file + logging.basicConfig(filename="insect-detect/data/script_log.log", encoding="utf-8", + format="%(asctime)s - %(levelname)s: %(message)s", level=logging.INFO) + logger = logging.getLogger() + sys.stderr.write = logger.error + # Inform that logging has been configured + logging.info("Logging is configured.") + + return logger diff --git a/refactored/main-copy.py b/refactored/main-copy.py new file mode 100644 index 0000000..8cb1f18 --- /dev/null +++ b/refactored/main-copy.py @@ -0,0 +1,46 @@ +import time +import subprocess +import threading +import logging + +def run_led_script(): + try: + import LED + logging.info("LED script running...") + LED.run_LEDS() + logging.info("LED script finished.") + except Exception as e: + logging.error(f"Error running LED script: {e}") + raise + +def capture_script(): + try: + logging.info("Capture script starting...") + subprocess.run(["python3", "insect-detect/capture.py"]) + logging.info("Capture script finished.") + except Exception as e: + logging.error(f"Error running capture script: {e}") + raise + +def main(): + logging.basicConfig(level=logging.INFO) + + # Initialize and start LED thread + led_thread = threading.Thread(target=run_led_script) + logging.info("Starting LED script") + led_thread.start() + + # Initialize capture thread and start it after a delay + time.sleep(3) # Keep this if you need a delay before starting capture + capture_thread = threading.Thread(target=capture_script) + logging.info("Starting capture script") + capture_thread.start() + + # Wait for both threads to complete + led_thread.join() + capture_thread.join() + + logging.info("Main script execution complete") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/refactored/main.py b/refactored/main.py new file mode 100644 index 0000000..022d3a7 --- /dev/null +++ b/refactored/main.py @@ -0,0 +1,46 @@ +import time +import subprocess +import threading +import logging + +def run_led_script(): + try: + import LED + logging.info("LED script running...") + LED.run_LEDS() + logging.info("LED script finished.") + except Exception as e: + logging.error(f"Error running LED script: {e}") + raise + +def capture_script(): + try: + logging.info("Capture script starting...") + subprocess.run(["python3", "insect-detect/capture.py"]) + logging.info("Capture script finished.") + except Exception as e: + logging.error(f"Error running capture script: {e}") + raise + +def main(): + logging.basicConfig(level=logging.INFO) + + # Start LED script in a separate thread + led_thread = threading.Thread(target=run_led_script) + logging.info("Starting LED script") + led_thread.start() + + # Wait for 1 minute before starting the capture script + time.sleep(3) + + # Start capture script and wait for it to complete + logging.info("Starting capture script") + capture_script() + + # Wait for the LED script to finish if it's still running + led_thread.join() + + logging.info("Main script execution complete") + +if __name__ == "__main__": + main() diff --git a/refactored/model_config.py b/refactored/model_config.py new file mode 100644 index 0000000..d7e9430 --- /dev/null +++ b/refactored/model_config.py @@ -0,0 +1,11 @@ +# model_config.py +from pathlib import Path +import json + +MODEL_PATH = Path("insect-detect/models/yolov5n_320_openvino_2022.1_4shave.blob") +CONFIG_PATH = Path("insect-detect/models/json/yolov5_v7_320.json") + +def load_model_config(): + with CONFIG_PATH.open(encoding="utf-8") as f: + config = json.load(f) + return MODEL_PATH, config diff --git a/refactored/power_management.py b/refactored/power_management.py new file mode 100644 index 0000000..dbaeeea --- /dev/null +++ b/refactored/power_management.py @@ -0,0 +1,22 @@ +from pijuice import PiJuice +import psutil +import time +import subprocess +from pathlib import Path + +def check_system_resources(logger): + # Instantiate PiJuice + pijuice = PiJuice(1, 0x14) + + # Continue script only if battery charge level and free disk space (MB) are higher than thresholds + chargelevel_start = pijuice.status.GetChargeLevel().get("data", -1) + disk_free = round(psutil.disk_usage("/").free / 1048576) + if chargelevel_start < 10 or disk_free < 200: + logger.info(f"Shut down without recording | Charge level: {chargelevel_start}%\n") + subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) + time.sleep(5) # wait 5 seconds for RPi to shut down + + # Optional: Disable charging of PiJuice battery if charge level is higher than threshold + #if chargelevel_start > 80: + # pijuice.config.SetChargingConfig({"charging_enabled": False}) + return pijuice, chargelevel_start \ No newline at end of file diff --git a/refactored/run.py b/refactored/run.py new file mode 100644 index 0000000..595c90c --- /dev/null +++ b/refactored/run.py @@ -0,0 +1,97 @@ +import time +import logging +import depthai as dai +from apscheduler.schedulers.background import BackgroundScheduler +import subprocess +import traceback +from data_log import save_logs, record_log +from data_management import store_data +# Ensure that save_logs and record_log functions are imported or defined here +# from your_logging_module import save_logs, record_log +# from your_data_handling_module import store_data +# Ensure the args variable is accessible if needed, consider passing it as a parameter +def run(save_logs, save_raw_frames, save_overlay_frames, crop_bbox, four_k_resolution, webhook_url, latest_images, image_count, labels, pijuice, chargelevel_start, logger, pipeline, rec_id, rec_start, save_path): + # Connect to OAK device and start pipeline in USB2 mode + + + with dai.Device(pipeline, maxUsbSpeed=dai.UsbSpeed.HIGH) as device: + + # Write RPi + OAK + battery info to .csv log file at specified interval + if save_logs: + logging.getLogger("apscheduler").setLevel(logging.WARNING) + scheduler = BackgroundScheduler() + scheduler.add_job(save_logs, "interval", seconds=30, id="log") + scheduler.start() + + # Create empty list to save charge level (if < 10) and set charge level + lst_chargelevel = [] + chargelevel = chargelevel_start + + # Set recording time conditional on PiJuice battery charge level + if chargelevel >= 70: + rec_time = 60 * 40 + elif 50 <= chargelevel < 70: + rec_time = 60 * 30 + elif 30 <= chargelevel < 50: + rec_time = 60 * 20 + elif 15 <= chargelevel < 30: + rec_time = 60 * 10 + else: + rec_time = 60 * 5 + + # Write info on start of recording to log file + logger.info(f"Rec ID: {rec_id} | Rec time: {int(rec_time / 60)} min | Charge level: {chargelevel}%") + + # Create output queues to get the frames and tracklets + detections from the outputs defined above + q_frame = device.getOutputQueue(name="frame", maxSize=4, blocking=False) + q_track = device.getOutputQueue(name="track", maxSize=4, blocking=False) + + # Set start time of recording + start_time = time.monotonic() + + try: + # Record until recording time is finished or charge level dropped below threshold for 10 times + while time.monotonic() < start_time + rec_time and len(lst_chargelevel) < 10: + + # Update charge level (return "99" if not readable and write to list if < 10) + chargelevel = pijuice.status.GetChargeLevel().get("data", 99) + if chargelevel < 10: + lst_chargelevel.append(chargelevel) + + # Get synchronized HQ frames + tracker output (passthrough detections) + if q_frame.has(): + frame = q_frame.get().getCvFrame() + + if q_track.has(): + tracks = q_track.get().tracklets + + # Save cropped detections (slower if saving additional HQ frames) + store_data(frame, tracks, rec_id, rec_start, save_path, labels, save_raw_frames, save_overlay_frames, crop_bbox, four_k_resolution, webhook_url, latest_images, image_count) + + # Wait for 1 second + time.sleep(1) + + # Write info on end of recording to log file and write record logs to .csv + logger.info(f"Recording {rec_id} finished | Charge level: {chargelevel}%\n") + record_log(rec_id, rec_start, save_path, chargelevel_start, chargelevel, start_time) + + # Enable charging of PiJuice battery if charge level is lower than threshold + if chargelevel < 80: + pijuice.config.SetChargingConfig({"charging_enabled": True}) + + # Shutdown Raspberry Pi + subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) + + # Write info on error during recording to log file and write record logs to .csv + except Exception: + logger.error(traceback.format_exc()) + logger.error(f"Error during recording {rec_id} | Charge level: {chargelevel}%\n") + record_log(rec_id, rec_start, save_path, chargelevel_start, chargelevel, start_time) + + # Enable charging of PiJuice battery if charge level is lower than threshold + if chargelevel < 80: + pijuice.config.SetChargingConfig({"charging_enabled": True}) + + # Shutdown Raspberry Pi + subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) + return frame, tracks \ No newline at end of file diff --git a/refactored/setup_directories.py b/refactored/setup_directories.py new file mode 100644 index 0000000..e889de0 --- /dev/null +++ b/refactored/setup_directories.py @@ -0,0 +1,20 @@ +from datetime import datetime +from pathlib import Path + + # Create new folders for each day, recording interval and object class +def setup_directories(labels, save_raw, save_overlay): + rec_start = datetime.now().strftime("%Y%m%d_%H-%M") + save_path = f"insect-detect/data/{rec_start[:8]}/{rec_start}" + for text in labels: + Path(f"{save_path}/cropped/{text}").mkdir(parents=True, exist_ok=True) + if save_raw: + Path(f"{save_path}/raw").mkdir(parents=True, exist_ok=True) + if save_overlay: + Path(f"{save_path}/overlay").mkdir(parents=True, exist_ok=True) + + # Calculate current recording ID by subtracting number of directories with date-prefix + folders_dates = len([f for f in Path("insect-detect/data").glob("**/20*") if f.is_dir()]) + folders_days = len([f for f in Path("insect-detect/data").glob("20*") if f.is_dir()]) + rec_id = folders_dates - folders_days + + return save_path, rec_id, rec_start \ No newline at end of file diff --git a/refactored/setup_pipeline.py b/refactored/setup_pipeline.py new file mode 100644 index 0000000..9e8c3ee --- /dev/null +++ b/refactored/setup_pipeline.py @@ -0,0 +1,107 @@ +import depthai as dai +from model_config import load_model_config + +def create_pipeline(four_k_resolution): + model_path, config = load_model_config() + + # Get detection model metadata from config JSON + nn_config = config.get("nn_config", {}) + nn_metadata = nn_config.get("NN_specific_metadata", {}) + classes = nn_metadata.get("classes", {}) + coordinates = nn_metadata.get("coordinates", {}) + anchors = nn_metadata.get("anchors", {}) + anchor_masks = nn_metadata.get("anchor_masks", {}) + iou_threshold = nn_metadata.get("iou_threshold", {}) + confidence_threshold = nn_metadata.get("confidence_threshold", {}) + nn_mappings = config.get("mappings", {}) + labels = nn_mappings.get("labels", {}) + + # Create depthai pipeline + pipeline = dai.Pipeline() + + # Create and configure camera node + cam_rgb = pipeline.create(dai.node.ColorCamera) + #cam_rgb.initialControl.setAutoFocusLensRange(142,146) # platform ~9.5 inches from the camera + #cam_rgb.initialControl.setManualFocus(143) # platform ~9.5 inches from the camera + cam_rgb.initialControl.setManualExposure(80000,400) + #cam_rgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) + + + if not four_k_resolution: + cam_rgb.setIspScale(1, 2) # downscale 4K to 1080p HQ frames (1920x1080 px) + cam_rgb.setPreviewSize(320, 320) # downscaled LQ frames for model input + cam_rgb.setPreviewKeepAspectRatio(False) # "squeeze" frames (16:9) to square (1:1) + cam_rgb.setInterleaved(False) # planar layout + cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) + + cam_rgb.setFps(10) # frames per second available for focus/exposure/model input + + # Create detection network node and define input + nn = pipeline.create(dai.node.YoloDetectionNetwork) + cam_rgb.preview.link(nn.input) # downscaled LQ frames as model input + nn.input.setBlocking(False) + + # Set detection model specific settings + nn.setBlobPath(model_path) + nn.setNumClasses(classes) + nn.setCoordinateSize(coordinates) + nn.setAnchors(anchors) + nn.setAnchorMasks(anchor_masks) + nn.setIouThreshold(iou_threshold) + nn.setConfidenceThreshold(confidence_threshold) + nn.setNumInferenceThreads(2) + + # Create and configure object tracker node and define inputs + tracker = pipeline.create(dai.node.ObjectTracker) + tracker.setTrackerType(dai.TrackerType.ZERO_TERM_IMAGELESS) + #tracker.setTrackerType(dai.TrackerType.SHORT_TERM_IMAGELESS) # better for low fps + tracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID) + nn.passthrough.link(tracker.inputTrackerFrame) + nn.passthrough.link(tracker.inputDetectionFrame) + nn.out.link(tracker.inputDetections) + + # Create script node and define inputs + script = pipeline.create(dai.node.Script) + script.setProcessor(dai.ProcessorType.LEON_CSS) + cam_rgb.video.link(script.inputs["frames"]) # HQ frames + script.inputs["frames"].setBlocking(False) + tracker.out.link(script.inputs["tracker"]) # tracklets + passthrough detections + script.inputs["tracker"].setBlocking(False) + + # Set script that will be run on-device (Luxonis OAK) + script.setScript(''' + # Create empty list to save HQ frames + sequence numbers + lst = [] + + def get_synced_frame(track_seq): + """Compare tracker with frame sequence number and send frame if equal.""" + global lst + for i, frame in enumerate(lst): + if track_seq == frame.getSequenceNum(): + lst = lst[i:] + break + return lst[0] + + # Sync tracker output with HQ frames + while True: + lst.append(node.io["frames"].get()) + tracks = node.io["tracker"].tryGet() + if tracks is not None: + track_seq = node.io["tracker"].get().getSequenceNum() + if len(lst) == 0: continue + node.io["frame_out"].send(get_synced_frame(track_seq)) + node.io["track_out"].send(tracks) + lst.pop(0) # remove synchronized frame from the list + ''') + + # Define script node outputs + xout_rgb = pipeline.create(dai.node.XLinkOut) + xout_rgb.setStreamName("frame") + script.outputs["frame_out"].link(xout_rgb.input) # synced HQ frames + + xout_tracker = pipeline.create(dai.node.XLinkOut) + xout_tracker.setStreamName("track") + script.outputs["track_out"].link(xout_tracker.input) # synced tracker output + + return pipeline, labels \ No newline at end of file From c2efe27d77a90b0bc0d8dce4aaff31b3bb0f6020 Mon Sep 17 00:00:00 2001 From: darasafe Date: Sat, 24 Feb 2024 01:25:21 -0600 Subject: [PATCH 2/3] refactored main script --- refactored/LED.py | 45 ---------------------------------------- refactored/main-copy.py | 46 ----------------------------------------- refactored/main.py | 46 ----------------------------------------- 3 files changed, 137 deletions(-) delete mode 100644 refactored/LED.py delete mode 100644 refactored/main-copy.py delete mode 100644 refactored/main.py diff --git a/refactored/LED.py b/refactored/LED.py deleted file mode 100644 index 5c3fd7e..0000000 --- a/refactored/LED.py +++ /dev/null @@ -1,45 +0,0 @@ -import RPi.GPIO as GPIO -import time - -def run_LEDS(): - # Set the GPIO mode - GPIO.setmode(GPIO.BCM) - GPIO.setwarnings(False) - GPIO.cleanup() - - # Set the GPIO pins for the LEDs - UV_LED_PIN = 22 # UV light - WHITE_LED_PIN = 17 # White light - - - # Set the GPIO pins as outputs - GPIO.setup(UV_LED_PIN, GPIO.OUT) - GPIO.setup(WHITE_LED_PIN, GPIO.OUT) - - # Turn off the LEDs - GPIO.output(UV_LED_PIN, GPIO.LOW) - GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED - print("LEDs OFF") - - time.sleep(1) - - # Turn on the UV LED - GPIO.output(UV_LED_PIN, GPIO.HIGH) - print("UV LED ON - Attracting bugs") - - # Wait for 5 minutes before turning on the white LED - time.sleep(5) # 300 seconds = 5 minutes - - # Turn on the white LED - GPIO.output(WHITE_LED_PIN, GPIO.HIGH) - print("White LED ON") - - time.sleep(55) - - # Turn off the LEDs - GPIO.output(UV_LED_PIN, GPIO.LOW) - GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED - print("LEDs OFF") - -if __name__ == "__main__": - run_LEDS() \ No newline at end of file diff --git a/refactored/main-copy.py b/refactored/main-copy.py deleted file mode 100644 index 8cb1f18..0000000 --- a/refactored/main-copy.py +++ /dev/null @@ -1,46 +0,0 @@ -import time -import subprocess -import threading -import logging - -def run_led_script(): - try: - import LED - logging.info("LED script running...") - LED.run_LEDS() - logging.info("LED script finished.") - except Exception as e: - logging.error(f"Error running LED script: {e}") - raise - -def capture_script(): - try: - logging.info("Capture script starting...") - subprocess.run(["python3", "insect-detect/capture.py"]) - logging.info("Capture script finished.") - except Exception as e: - logging.error(f"Error running capture script: {e}") - raise - -def main(): - logging.basicConfig(level=logging.INFO) - - # Initialize and start LED thread - led_thread = threading.Thread(target=run_led_script) - logging.info("Starting LED script") - led_thread.start() - - # Initialize capture thread and start it after a delay - time.sleep(3) # Keep this if you need a delay before starting capture - capture_thread = threading.Thread(target=capture_script) - logging.info("Starting capture script") - capture_thread.start() - - # Wait for both threads to complete - led_thread.join() - capture_thread.join() - - logging.info("Main script execution complete") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/refactored/main.py b/refactored/main.py deleted file mode 100644 index 022d3a7..0000000 --- a/refactored/main.py +++ /dev/null @@ -1,46 +0,0 @@ -import time -import subprocess -import threading -import logging - -def run_led_script(): - try: - import LED - logging.info("LED script running...") - LED.run_LEDS() - logging.info("LED script finished.") - except Exception as e: - logging.error(f"Error running LED script: {e}") - raise - -def capture_script(): - try: - logging.info("Capture script starting...") - subprocess.run(["python3", "insect-detect/capture.py"]) - logging.info("Capture script finished.") - except Exception as e: - logging.error(f"Error running capture script: {e}") - raise - -def main(): - logging.basicConfig(level=logging.INFO) - - # Start LED script in a separate thread - led_thread = threading.Thread(target=run_led_script) - logging.info("Starting LED script") - led_thread.start() - - # Wait for 1 minute before starting the capture script - time.sleep(3) - - # Start capture script and wait for it to complete - logging.info("Starting capture script") - capture_script() - - # Wait for the LED script to finish if it's still running - led_thread.join() - - logging.info("Main script execution complete") - -if __name__ == "__main__": - main() From 720ab69ca849fc4b8c285b6d98014218a2e221d4 Mon Sep 17 00:00:00 2001 From: darasafe Date: Thu, 4 Jul 2024 19:05:18 -0500 Subject: [PATCH 3/3] new run script --- LED.py | 45 +++ LEDoff.py | 27 ++ main.py | 70 +++++ yolo_tracker_save_hqsync.py | 41 ++- yolo_tracker_save_hqsync_pijuice.py | 422 --------------------------- yolo_tracker_save_hqsync_wittypi.py | 423 ---------------------------- 6 files changed, 181 insertions(+), 847 deletions(-) create mode 100644 LED.py create mode 100644 LEDoff.py create mode 100644 main.py delete mode 100644 yolo_tracker_save_hqsync_pijuice.py delete mode 100644 yolo_tracker_save_hqsync_wittypi.py diff --git a/LED.py b/LED.py new file mode 100644 index 0000000..eb10826 --- /dev/null +++ b/LED.py @@ -0,0 +1,45 @@ +import RPi.GPIO as GPIO +import time + +def run_LEDS(): + # Set the GPIO mode + GPIO.setmode(GPIO.BCM) + GPIO.setwarnings(False) + GPIO.cleanup() + + # Set the GPIO pins for the LEDs + UV_LED_PIN = 22 # UV light + WHITE_LED_PIN = 17 # White light + + + # Set the GPIO pins as outputs + GPIO.setup(UV_LED_PIN, GPIO.OUT) + GPIO.setup(WHITE_LED_PIN, GPIO.OUT) + + # Turn off the LEDs + GPIO.output(UV_LED_PIN, GPIO.LOW) + GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED + print("LEDs OFF") + + time.sleep(1) + + # Turn on the UV LED + GPIO.output(UV_LED_PIN, GPIO.HIGH) + print("UV LED ON - Attracting bugs") + + # Wait for 5 minutes before turning on the white LED + time.sleep(5*60) # 300 seconds = 5 minutes + + # Turn on the white LED + GPIO.output(WHITE_LED_PIN, GPIO.HIGH) + print("White LED ON") + + time.sleep(600) + + # Turn off the LEDs + GPIO.output(UV_LED_PIN, GPIO.LOW) + GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED + print("LEDs OFF") + +if __name__ == "__main__": + run_LEDS() \ No newline at end of file diff --git a/LEDoff.py b/LEDoff.py new file mode 100644 index 0000000..023e272 --- /dev/null +++ b/LEDoff.py @@ -0,0 +1,27 @@ +import RPi.GPIO as GPIO +import time + +def run_LEDS(): + # Set the GPIO mode + GPIO.setmode(GPIO.BCM) + GPIO.setwarnings(False) + GPIO.cleanup() + + # Set the GPIO pins for the LEDs + UV_LED_PIN = 22 # UV light + WHITE_LED_PIN = 17 # White light + + + # Set the GPIO pins as outputs + GPIO.setup(UV_LED_PIN, GPIO.OUT) + GPIO.setup(WHITE_LED_PIN, GPIO.OUT) + + # Turn off the LEDs + GPIO.output(UV_LED_PIN, GPIO.LOW) + GPIO.output(WHITE_LED_PIN, GPIO.LOW) # Turn off the new LED + print("LEDs OFF") + + time.sleep(5) + +if __name__ == "__main__": + run_LEDS() \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..a73c071 --- /dev/null +++ b/main.py @@ -0,0 +1,70 @@ +import time +import subprocess +import threading +import logging +import signal + +def run_led_script(): + try: + import LED + logging.info("LED script running...") + LED.run_LEDS() + logging.info("LED script finished.") + except Exception as e: + logging.error(f"Error running LED script: {e}") + raise + +def capture_script(): + try: + logging.info("Capture script starting...") + process = subprocess.Popen(["python3", "insect-detect/yolo_tracker_save_hqsync.py --4k"]) + return process + except Exception as e: + logging.error(f"Error running capture script: {e}") + raise + +def terminate_process(process): + try: + process.terminate() + process.wait(timeout=5) + logging.info("Capture script terminated.") + except Exception as e: + logging.error(f"Error terminating capture script: {e}") + raise + +def main(): + logging.basicConfig(level=logging.INFO) + start_time = time.time() + duration = 2 * 60 * 60 # 2 hours in seconds + + while time.time() - start_time < duration: + # Start LED script in a separate thread + led_thread = threading.Thread(target=run_led_script) + logging.info("Starting LED script") + led_thread.start() + + # Wait for 5 minutes before starting the capture script + time.sleep(5 * 60) + + # Start capture script in a separate process + logging.info("Starting capture script") + capture_process = capture_script() + + # Run capture script for 15-20 minutes + capture_duration = 15 * 60 # 15 minutes in seconds + time.sleep(capture_duration) + + # Terminate the capture script + logging.info("Terminating capture script") + terminate_process(capture_process) + + # Wait for the LED script to finish if it's still running + led_thread.join() + + # Wait for 10 minutes break before the next cycle + time.sleep(10 * 60) + + logging.info("Main script execution complete after 2 hours") + +if __name__ == "__main__": + main() diff --git a/yolo_tracker_save_hqsync.py b/yolo_tracker_save_hqsync.py index 74a58db..2df17b0 100644 --- a/yolo_tracker_save_hqsync.py +++ b/yolo_tracker_save_hqsync.py @@ -74,6 +74,7 @@ import time from datetime import datetime, timedelta from pathlib import Path +import requests import depthai as dai import psutil @@ -86,7 +87,7 @@ # Define optional arguments parser = argparse.ArgumentParser() -parser.add_argument("-min", "--min_rec_time", type=int, choices=range(1, 721), default=2, +parser.add_argument("-min", "--min_rec_time", type=int, choices=range(1, 721), default=15, help="Set recording time in minutes (default: 2 [min]).", metavar="1-720") parser.add_argument("-4k", "--four_k_resolution", action="store_true", help="Set camera resolution to 4K (3840x2160 px) (default: 1080p).") @@ -293,6 +294,11 @@ # Set start time of recording and create empty list to save threads start_time = time.monotonic() threads = [] + #Nightlife + latest_images = {} + image_count = {} # Dictionary to keep track of image count for each track_id + webhook_url = "https://nytelyfe-402203.uc.r.appspot.com/upload" # Webhook URL + #Nightlife try: # Record until recording time is finished @@ -331,8 +337,39 @@ q_ctrl.send(ae_ctrl) # Save detections cropped from HQ frame together with metadata - save_crop_metadata(CAM_ID, rec_id, frame_hq, bbox_norm, label, det_conf, track_id, + path_crop = save_crop_metadata(CAM_ID, rec_id, frame_hq, bbox_norm, label, det_conf, track_id, bbox_orig, rec_start_format, save_path, args.crop_bbox) + + #Nightlife + # Update the latest image for this track_id + latest_images[track_id] = path_crop + + # Update image count for this track.id + image_count[track_id] = image_count.get(track_id, 0) + 1 + print(f"Image count for track_id {track_id}: {image_count[track_id]}") + + + if image_count[track_id] == 5: + try: + with open(path_crop, 'rb') as f: + #Open metadata CSV + #with open(f"{save_path}/metadata_{rec_start}.csv", 'rb') as metadata_file: + # Prepare the files to be sent + files = {'file': f} + #'metadata': ('metadata.csv', metadata_file) + + data = { + 'accountID': 'Y7I3Jmp7dCXoank4WXKeTCSoPDp1' # Replace with your actual account ID + } + response = requests.post(webhook_url, files=files, data=data) + + if response.status_code == 200: + print(f"Successfully sent {path_crop} to webhook.") + else: + print(f"Failed to send image to webhook. Status code: {response.status_code}") + except Exception as e: + print(f"An error occurred: {e}") + #Nightlife if args.save_full_frames == "det" and tracklet == tracks[-1]: # Save full HQ frame diff --git a/yolo_tracker_save_hqsync_pijuice.py b/yolo_tracker_save_hqsync_pijuice.py deleted file mode 100644 index 1c0adff..0000000 --- a/yolo_tracker_save_hqsync_pijuice.py +++ /dev/null @@ -1,422 +0,0 @@ -#!/usr/bin/env python3 - -"""Save cropped detections with associated metadata from detection model and object tracker. - -Source: https://github.com/maxsitt/insect-detect -License: GNU GPLv3 (https://choosealicense.com/licenses/gpl-3.0/) -Author: Maximilian Sittinger (https://github.com/maxsitt) -Docs: https://maxsitt.github.io/insect-detect-docs/ - -- write info and error (+ traceback) messages to log file -- shut down Raspberry Pi without recording if free disk space or current PiJuice - battery charge level are lower than the specified thresholds (default: 100 MB and 10%) -- duration of each recording interval conditional on current PiJuice battery charge level - -> increases efficiency of battery usage and can prevent gaps in recordings -- create directory for each day, recording interval and object class to save images + metadata -- run a custom YOLO object detection model (.blob format) on-device (Luxonis OAK) - -> inference on downscaled + stretched/cropped LQ frames (default: 320x320 px) -- use an object tracker to track detected objects and assign unique tracking IDs - -> accuracy depends on object motion speed and inference speed of the detection model -- synchronize tracker output (including detections) from inference on LQ frames with - HQ frames (default: 1920x1080 px) on-device using the respective message timestamps - -> pipeline speed (= inference speed): ~13.4 fps (1080p sync) or ~3.4 fps (4K sync) for full FOV - ~23 fps (1080x1080) or ~5.8 fps (2160x2160) for reduced FOV -- save detections (bounding box area) cropped from HQ frames to .jpg at the - specified capture frequency (default: 1 s), optionally together with full frames -- save corresponding metadata from tracker (+ model) output (time, label, confidence, - tracking ID, relative bbox coordinates, .jpg file path) to .csv -- write info about recording interval (rec ID, start/end time, duration, number of cropped - detections, unique tracking IDs, free disk space, battery charge level) to 'record_log.csv' -- shut down Raspberry Pi after recording interval is finished or if charge level or - free disk space drop below the specified thresholds or if an error occurs -- optional arguments: - '-4k' crop detections from (+ save HQ frames in) 4K resolution (default: 1080p) - -> decreases pipeline speed to ~3.4 fps (1080p: ~13.4 fps) - '-fov' default: stretch frames to square for model input ('-fov stretch') - -> full FOV is preserved, only aspect ratio is changed (adds distortion) - -> HQ frame resolution: 1920x1080 px (default) or 3840x2160 px ('-4k') - optional: crop frames to square for model input ('-fov crop') - -> FOV is reduced due to cropping of left and right side (no distortion) - -> HQ frame resolution: 1080x1080 px (default) or 2160x2160 px ('-4k') - -> increases pipeline speed to ~23 fps (4K: ~5.8 fps) - '-af' set auto focus range in cm (min distance, max distance) - -> e.g. '-af 14 20' to restrict auto focus range to 14-20 cm - '-ae' use bounding box coordinates from detections to set auto exposure region - -> can improve image quality of crops and thereby classification accuracy - '-crop' default: save cropped detections with aspect ratio 1:1 ('-crop square') OR - optional: keep original bbox size with variable aspect ratio ('-crop tight') - -> '-crop square' increases bbox size on both sides of the minimum dimension, - or only on one side if object is localized at frame margin - -> can increase classification accuracy by avoiding stretching of the - cropped insect image during resizing for classification inference - '-full' additionally save full HQ frames to .jpg (e.g. for training data collection) - -> '-full det' save full frame together with cropped detections - -> slightly decreases pipeline speed - -> '-full freq' save full frame at specified frequency (default: 60 s) - '-overlay' additionally save full HQ frames with overlays (bbox + info) to .jpg - -> slightly decreases pipeline speed - '-log' write RPi CPU + OAK chip temperature, RPi available memory (MB) + - CPU utilization (%) and battery info to .csv file at specified frequency - '-zip' store all captured data in an uncompressed .zip file for each day - and delete original directory - -> increases file transfer speed from microSD to computer - but also on-device processing time and power consumption - -based on open source scripts available at https://github.com/luxonis -""" - -import argparse -import json -import logging -import socket -import subprocess -import threading -import time -from datetime import datetime, timedelta -from pathlib import Path - -import depthai as dai -import psutil -from apscheduler.schedulers.background import BackgroundScheduler -from pijuice import PiJuice - -from utils.general import frame_norm, zip_data -from utils.log import record_log, save_logs -from utils.oak_cam import bbox_set_exposure_region, set_focus_range -from utils.save_data import save_crop_metadata, save_full_frame, save_overlay_frame - -# Define optional arguments -parser = argparse.ArgumentParser() -parser.add_argument("-4k", "--four_k_resolution", action="store_true", - help="Set camera resolution to 4K (3840x2160 px) (default: 1080p).") -parser.add_argument("-fov", "--adjust_fov", choices=["stretch", "crop"], default="stretch", type=str, - help="Stretch frames to square ('stretch') and preserve full FOV or " - "crop frames to square ('crop') and reduce FOV.") -parser.add_argument("-af", "--af_range", nargs=2, type=int, - help="Set auto focus range in cm (min distance, max distance).", metavar=("CM_MIN", "CM_MAX")) -parser.add_argument("-ae", "--bbox_ae_region", action="store_true", - help="Use bounding box coordinates from detections to set auto exposure region.") -parser.add_argument("-crop", "--crop_bbox", choices=["square", "tight"], default="square", type=str, - help=("Save cropped detections with aspect ratio 1:1 ('square') or " - "keep original bbox size with variable aspect ratio ('tight').")) -parser.add_argument("-full", "--save_full_frames", choices=["det", "freq"], default=None, type=str, - help="Additionally save full HQ frames to .jpg together with cropped detections ('det') " - "or at specified frequency, independent of detections ('freq').") -parser.add_argument("-overlay", "--save_overlay_frames", action="store_true", - help="Additionally save full HQ frames with overlays (bbox + info) to .jpg.") -parser.add_argument("-log", "--save_logs", action="store_true", - help=("Write RPi CPU + OAK chip temperature, RPi available memory (MB) + " - "CPU utilization (%%) and battery info to .csv file.")) -parser.add_argument("-zip", "--zip_data", action="store_true", - help="Store data in an uncompressed .zip file for each day and delete original directory.") -args = parser.parse_args() - -# Set file paths to the detection model and corresponding config JSON -MODEL_PATH = Path("insect-detect/models/yolov5n_320_openvino_2022.1_4shave.blob") -CONFIG_PATH = Path("insect-detect/models/json/yolov5_v7_320.json") - -# Set threshold values required to start and continue a recording -MIN_DISKSPACE = 100 # minimum free disk space (MB) (default: 100 MB) -MIN_CHARGELEVEL = 10 # minimum PiJuice battery charge level (default: 10%) - -# Set capture frequency (default: 1 second) -# -> wait for specified amount of seconds between saving cropped detections + metadata -CAPTURE_FREQ = 1 - -# Set frequency for saving full frames if "-full freq" is used (default: 60 seconds) -FULL_FREQ = 60 - -# Set frequency for saving logs to .csv file if "-log" is used (default: 30 seconds) -LOG_FREQ = 30 - -# Set camera ID (default: hostname) -CAM_ID = socket.gethostname() - -# Set logging level and format, write logs to file -Path("insect-detect/data").mkdir(parents=True, exist_ok=True) -script_name = Path(__file__).stem -logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s: %(message)s", - filename=f"insect-detect/data/{script_name}_log.log", encoding="utf-8") -logger = logging.getLogger() - -# Instantiate PiJuice -pijuice = PiJuice(1, 0x14) - -# Shut down Raspberry Pi if battery charge level or free disk space (MB) are lower than thresholds -chargelevel_start = pijuice.status.GetChargeLevel().get("data", -1) -disk_free = round(psutil.disk_usage("/").free / 1048576) -if chargelevel_start < MIN_CHARGELEVEL or disk_free < MIN_DISKSPACE: - logger.info("Shut down without recording | Charge level: %s%%\n", chargelevel_start) - subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) - -# Set recording time conditional on PiJuice battery charge level -if chargelevel_start >= 70: - REC_TIME = 60 * 40 # PiJuice battery charge level > 70: 40 min -elif 50 <= chargelevel_start < 70: - REC_TIME = 60 * 30 # PiJuice battery charge level 50-70: 30 min -elif 30 <= chargelevel_start < 50: - REC_TIME = 60 * 20 # PiJuice battery charge level 30-50: 20 min -elif 15 <= chargelevel_start < 30: - REC_TIME = 60 * 10 # PiJuice battery charge level 15-30: 10 min -else: - REC_TIME = 60 * 5 # PiJuice battery charge level < 15: 5 min - -# Optional: Disable charging of PiJuice battery if charge level is higher than threshold -# -> can prevent overcharging and extend battery life -#if chargelevel_start > 80: -# pijuice.config.SetChargingConfig({"charging_enabled": False}) - -# Get last recording ID from text file and increment by 1 (create text file for first recording) -rec_id_file = Path("insect-detect/data/last_rec_id.txt") -rec_id = int(rec_id_file.read_text(encoding="utf-8")) + 1 if rec_id_file.exists() else 1 -rec_id_file.write_text(str(rec_id), encoding="utf-8") - -# Create directory per day and recording interval to save images + metadata + logs -rec_start = datetime.now() -rec_start_format = rec_start.strftime("%Y-%m-%d_%H-%M-%S") -save_path = Path(f"insect-detect/data/{rec_start.date()}/{rec_start_format}") -save_path.mkdir(parents=True, exist_ok=True) -if args.save_full_frames is not None: - (save_path / "full").mkdir(parents=True, exist_ok=True) -if args.save_overlay_frames: - (save_path / "overlay").mkdir(parents=True, exist_ok=True) - -# Get detection model metadata from config JSON -with CONFIG_PATH.open(encoding="utf-8") as config_json: - config = json.load(config_json) -nn_config = config.get("nn_config", {}) -nn_metadata = nn_config.get("NN_specific_metadata", {}) -classes = nn_metadata.get("classes", {}) -coordinates = nn_metadata.get("coordinates", {}) -anchors = nn_metadata.get("anchors", {}) -anchor_masks = nn_metadata.get("anchor_masks", {}) -iou_threshold = nn_metadata.get("iou_threshold", {}) -confidence_threshold = nn_metadata.get("confidence_threshold", {}) -nn_mappings = config.get("mappings", {}) -labels = nn_mappings.get("labels", {}) - -# Create folders for each object class to save cropped detections -for det_class in labels: - (save_path / f"crop/{det_class}").mkdir(parents=True, exist_ok=True) - -# Create depthai pipeline -pipeline = dai.Pipeline() - -# Create and configure color camera node -cam_rgb = pipeline.create(dai.node.ColorCamera) -#cam_rgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) # rotate image 180° -cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -if not args.four_k_resolution: - cam_rgb.setIspScale(1, 2) # downscale 4K to 1080p resolution -> HQ frames -cam_rgb.setPreviewSize(320, 320) # downscale frames for model input -> LQ frames -if args.adjust_fov == "stretch": - cam_rgb.setPreviewKeepAspectRatio(False) # stretch frames (16:9) to square (1:1) for model input -elif args.adjust_fov == "crop" and not args.four_k_resolution: - cam_rgb.setVideoSize(1080, 1080) # crop HQ frames to square -elif args.adjust_fov == "crop" and args.four_k_resolution: - cam_rgb.setVideoSize(2160, 2160) -cam_rgb.setInterleaved(False) # planar layout -cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) -cam_rgb.setFps(25) # frames per second available for auto focus/exposure and model input - -# Get sensor resolution -SENSOR_RES = cam_rgb.getResolutionSize() - -# Create detection network node and define input -nn = pipeline.create(dai.node.YoloDetectionNetwork) -cam_rgb.preview.link(nn.input) # downscaled + stretched LQ frames as model input -nn.input.setBlocking(False) - -# Set detection model specific settings -nn.setBlobPath(MODEL_PATH) -nn.setNumClasses(classes) -nn.setCoordinateSize(coordinates) -nn.setAnchors(anchors) -nn.setAnchorMasks(anchor_masks) -nn.setIouThreshold(iou_threshold) -nn.setConfidenceThreshold(confidence_threshold) -nn.setNumInferenceThreads(2) - -# Create and configure object tracker node and define inputs -tracker = pipeline.create(dai.node.ObjectTracker) -tracker.setTrackerType(dai.TrackerType.ZERO_TERM_IMAGELESS) -#tracker.setTrackerType(dai.TrackerType.SHORT_TERM_IMAGELESS) # better for low fps -tracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID) -nn.passthrough.link(tracker.inputTrackerFrame) -nn.passthrough.link(tracker.inputDetectionFrame) -nn.out.link(tracker.inputDetections) - -# Create and configure sync node and define inputs -sync = pipeline.create(dai.node.Sync) -sync.setSyncThreshold(timedelta(milliseconds=200)) -cam_rgb.video.link(sync.inputs["frames"]) # HQ frames -tracker.out.link(sync.inputs["tracker"]) # tracker output - -# Create message demux node and define input + outputs -demux = pipeline.create(dai.node.MessageDemux) -sync.out.link(demux.input) - -xout_rgb = pipeline.create(dai.node.XLinkOut) -xout_rgb.setStreamName("frame") -demux.outputs["frames"].link(xout_rgb.input) # synced HQ frames - -xout_tracker = pipeline.create(dai.node.XLinkOut) -xout_tracker.setStreamName("track") -demux.outputs["tracker"].link(xout_tracker.input) # synced tracker output - -if args.af_range or args.bbox_ae_region: - # Create XLinkIn node to send control commands to color camera node - xin_ctrl = pipeline.create(dai.node.XLinkIn) - xin_ctrl.setStreamName("control") - xin_ctrl.out.link(cam_rgb.inputControl) - -# Connect to OAK device and start pipeline in USB2 mode -with dai.Device(pipeline, maxUsbSpeed=dai.UsbSpeed.HIGH) as device: - - if args.save_logs or (args.save_full_frames == "freq"): - logging.getLogger("apscheduler").setLevel(logging.WARNING) - scheduler = BackgroundScheduler() - else: - scheduler = None - - if args.save_logs: - # Write RPi + OAK + battery info to .csv file at specified frequency - scheduler.add_job(save_logs, "interval", seconds=LOG_FREQ, id="log", - args=[CAM_ID, rec_id, device, rec_start, save_path, pijuice]) - scheduler.start() - - if args.save_full_frames == "freq": - # Save full HQ frame at specified frequency - scheduler.add_job(save_full_frame, "interval", seconds=FULL_FREQ, id="full", - args=[None, save_path]) - if not scheduler.running: - scheduler.start() - - # Write info on start of recording to log file - logger.info("Cam ID: %s | Rec ID: %s | Rec time: %s min | Charge level: %s%%", - CAM_ID, rec_id, int(REC_TIME / 60), chargelevel_start) - - # Create output queues to get the frames and tracklets (+ detections) from the outputs defined above - q_frame = device.getOutputQueue(name="frame", maxSize=4, blocking=False) - q_track = device.getOutputQueue(name="track", maxSize=4, blocking=False) - - if args.af_range or args.bbox_ae_region: - # Create input queue to send control commands to OAK camera - q_ctrl = device.getInputQueue(name="control", maxSize=16, blocking=False) - - if args.af_range: - # Set auto focus range to specified cm values - af_ctrl = set_focus_range(args.af_range[0], args.af_range[1]) - q_ctrl.send(af_ctrl) - - # Set start time of recording and create empty lists to save charge level and threads - start_time = time.monotonic() - chargelevel_list = [] - threads = [] - - try: - # Record until recording time is finished - # Stop recording early if free disk space drops below threshold OR - # if charge level dropped below threshold for 10 times - while time.monotonic() < start_time + REC_TIME and disk_free > MIN_DISKSPACE and len(chargelevel_list) < 10: - - # Get synchronized HQ frame + tracker output (including passthrough detections) - if q_frame.has() and q_track.has(): - frame_hq = q_frame.get().getCvFrame() - tracks = q_track.get().tracklets - - if args.save_full_frames == "freq": - # Save full HQ frame at specified frequency - scheduler.modify_job("full", args=[frame_hq, save_path]) - - if args.save_overlay_frames: - # Copy frame for drawing overlays - frame_hq_copy = frame_hq.copy() - - for tracklet in tracks: - # Only use tracklets that are currently tracked (not "NEW", "LOST" or "REMOVED") - if tracklet.status.name == "TRACKED": - # Get bounding box from passthrough detections - bbox_orig = (tracklet.srcImgDetection.xmin, tracklet.srcImgDetection.ymin, - tracklet.srcImgDetection.xmax, tracklet.srcImgDetection.ymax) - bbox_norm = frame_norm(frame_hq, bbox_orig) - - # Get metadata from tracker output (including passthrough detections) - label = labels[tracklet.srcImgDetection.label] - det_conf = round(tracklet.srcImgDetection.confidence, 2) - track_id = tracklet.id - - if args.bbox_ae_region and tracklet == tracks[-1]: - # Use model bbox from latest tracking ID to set auto exposure region - ae_ctrl = bbox_set_exposure_region(bbox_orig, SENSOR_RES) - q_ctrl.send(ae_ctrl) - - # Save detections cropped from HQ frame together with metadata - save_crop_metadata(CAM_ID, rec_id, frame_hq, bbox_norm, label, det_conf, track_id, - bbox_orig, rec_start_format, save_path, args.crop_bbox) - - if args.save_full_frames == "det" and tracklet == tracks[-1]: - # Save full HQ frame - thread_full = threading.Thread(target=save_full_frame, - args=(frame_hq, save_path)) - thread_full.start() - threads.append(thread_full) - - if args.save_overlay_frames: - # Save full HQ frame with overlays - thread_overlay = threading.Thread(target=save_overlay_frame, - args=(frame_hq_copy, bbox_norm, label, - det_conf, track_id, tracklet, tracks, - save_path, args.four_k_resolution)) - thread_overlay.start() - threads.append(thread_overlay) - - # Update free disk space (MB) - disk_free = round(psutil.disk_usage("/").free / 1048576) - - # Update charge level (return "99" if not readable, add to list if lower than threshold) - chargelevel = pijuice.status.GetChargeLevel().get("data", 99) - if chargelevel < MIN_CHARGELEVEL: - chargelevel_list.append(chargelevel) - - # Keep only active threads in list - threads = [thread for thread in threads if thread.is_alive()] - - # Wait for specified amount of seconds (default: 1) - time.sleep(CAPTURE_FREQ) - - # Write info on end of recording to log file - logger.info("Recording %s finished | Charge level: %s%%\n", rec_id, chargelevel) - - except KeyboardInterrupt: - # Write info on KeyboardInterrupt (Ctrl+C) to log file - logger.info("Recording %s stopped by Ctrl+C | Charge level: %s%%\n", rec_id, chargelevel) - - except Exception: - # Write info on error + traceback during recording to log file - logger.exception("Error during recording %s | Charge level: %s%%", rec_id, chargelevel) - - finally: - # Shut down scheduler (wait until currently executing jobs are finished) - if scheduler: - scheduler.shutdown() - - # Wait for active threads to finish - for thread in threads: - thread.join() - - # Write record logs to .csv file - rec_end = datetime.now() - record_log(CAM_ID, rec_id, rec_start, rec_start_format, rec_end, save_path, - chargelevel_start, chargelevel) - - if args.zip_data: - # Store data in uncompressed .zip file and delete original folder - zip_data(save_path) - - # (Re-)activate charging of PiJuice battery if charge level is lower than threshold - if chargelevel < 80: - pijuice.config.SetChargingConfig({"charging_enabled": True}) - - # Shut down Raspberry Pi - subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) diff --git a/yolo_tracker_save_hqsync_wittypi.py b/yolo_tracker_save_hqsync_wittypi.py deleted file mode 100644 index 5387f9b..0000000 --- a/yolo_tracker_save_hqsync_wittypi.py +++ /dev/null @@ -1,423 +0,0 @@ -#!/usr/bin/env python3 - -"""Save cropped detections with associated metadata from detection model and object tracker. - -Source: https://github.com/maxsitt/insect-detect -License: GNU GPLv3 (https://choosealicense.com/licenses/gpl-3.0/) -Author: Maximilian Sittinger (https://github.com/maxsitt) -Docs: https://maxsitt.github.io/insect-detect-docs/ - -- write info and error (+ traceback) messages to log file -- shut down Raspberry Pi without recording if free disk space or current Witty Pi - battery charge level are lower than the specified thresholds (default: 100 MB and 20%) -- duration of each recording interval conditional on current Witty Pi battery charge level - -> increases efficiency of battery usage and can prevent gaps in recordings -- create directory for each day, recording interval and object class to save images + metadata -- run a custom YOLO object detection model (.blob format) on-device (Luxonis OAK) - -> inference on downscaled + stretched/cropped LQ frames (default: 320x320 px) -- use an object tracker to track detected objects and assign unique tracking IDs - -> accuracy depends on object motion speed and inference speed of the detection model -- synchronize tracker output (including detections) from inference on LQ frames with - HQ frames (default: 1920x1080 px) on-device using the respective message timestamps - -> pipeline speed (= inference speed): ~13.4 fps (1080p sync) or ~3.4 fps (4K sync) for full FOV - ~23 fps (1080x1080) or ~5.8 fps (2160x2160) for reduced FOV -- save detections (bounding box area) cropped from HQ frames to .jpg at the - specified capture frequency (default: 1 s), optionally together with full frames -- save corresponding metadata from tracker (+ model) output (time, label, confidence, - tracking ID, relative bbox coordinates, .jpg file path) to .csv -- write info about recording interval (rec ID, start/end time, duration, number of cropped - detections, unique tracking IDs, free disk space, battery charge level) to 'record_log.csv' -- shut down Raspberry Pi after recording interval is finished or if charge level or - free disk space drop below the specified thresholds or if an error occurs -- optional arguments: - '-4k' crop detections from (+ save HQ frames in) 4K resolution (default: 1080p) - -> decreases pipeline speed to ~3.4 fps (1080p: ~13.4 fps) - '-fov' default: stretch frames to square for model input ('-fov stretch') - -> full FOV is preserved, only aspect ratio is changed (adds distortion) - -> HQ frame resolution: 1920x1080 px (default) or 3840x2160 px ('-4k') - optional: crop frames to square for model input ('-fov crop') - -> FOV is reduced due to cropping of left and right side (no distortion) - -> HQ frame resolution: 1080x1080 px (default) or 2160x2160 px ('-4k') - -> increases pipeline speed to ~23 fps (4K: ~5.8 fps) - '-af' set auto focus range in cm (min distance, max distance) - -> e.g. '-af 14 20' to restrict auto focus range to 14-20 cm - '-ae' use bounding box coordinates from detections to set auto exposure region - -> can improve image quality of crops and thereby classification accuracy - '-crop' default: save cropped detections with aspect ratio 1:1 ('-crop square') OR - optional: keep original bbox size with variable aspect ratio ('-crop tight') - -> '-crop square' increases bbox size on both sides of the minimum dimension, - or only on one side if object is localized at frame margin - -> can increase classification accuracy by avoiding stretching of the - cropped insect image during resizing for classification inference - '-full' additionally save full HQ frames to .jpg (e.g. for training data collection) - -> '-full det' save full frame together with cropped detections - -> slightly decreases pipeline speed - -> '-full freq' save full frame at specified frequency (default: 60 s) - '-overlay' additionally save full HQ frames with overlays (bbox + info) to .jpg - -> slightly decreases pipeline speed - '-log' write RPi CPU + OAK chip temperature, RPi available memory (MB) + - CPU utilization (%) and battery info to .csv file at specified frequency - '-zip' store all captured data in an uncompressed .zip file for each day - and delete original directory - -> increases file transfer speed from microSD to computer - but also on-device processing time and power consumption - -based on open source scripts available at https://github.com/luxonis -""" - -import argparse -import json -import logging -import signal -import socket -import subprocess -import threading -import time -from datetime import datetime, timedelta -from pathlib import Path - -import depthai as dai -import psutil -from apscheduler.schedulers.background import BackgroundScheduler - -from utils.general import create_signal_handler, frame_norm, zip_data -from utils.log import record_log, save_logs -from utils.oak_cam import bbox_set_exposure_region, set_focus_range -from utils.save_data import save_crop_metadata, save_full_frame, save_overlay_frame -from utils.wittypi import WittyPiStatus - -# Define optional arguments -parser = argparse.ArgumentParser() -parser.add_argument("-4k", "--four_k_resolution", action="store_true", - help="Set camera resolution to 4K (3840x2160 px) (default: 1080p).") -parser.add_argument("-fov", "--adjust_fov", choices=["stretch", "crop"], default="stretch", type=str, - help="Stretch frames to square ('stretch') and preserve full FOV or " - "crop frames to square ('crop') and reduce FOV.") -parser.add_argument("-af", "--af_range", nargs=2, type=int, - help="Set auto focus range in cm (min distance, max distance).", metavar=("CM_MIN", "CM_MAX")) -parser.add_argument("-ae", "--bbox_ae_region", action="store_true", - help="Use bounding box coordinates from detections to set auto exposure region.") -parser.add_argument("-crop", "--crop_bbox", choices=["square", "tight"], default="square", type=str, - help=("Save cropped detections with aspect ratio 1:1 ('square') or " - "keep original bbox size with variable aspect ratio ('tight').")) -parser.add_argument("-full", "--save_full_frames", choices=["det", "freq"], default=None, type=str, - help="Additionally save full HQ frames to .jpg together with cropped detections ('det') " - "or at specified frequency, independent of detections ('freq').") -parser.add_argument("-overlay", "--save_overlay_frames", action="store_true", - help="Additionally save full HQ frames with overlays (bbox + info) to .jpg.") -parser.add_argument("-log", "--save_logs", action="store_true", - help=("Write RPi CPU + OAK chip temperature, RPi available memory (MB) + " - "CPU utilization (%%) and battery info to .csv file.")) -parser.add_argument("-zip", "--zip_data", action="store_true", - help="Store data in an uncompressed .zip file for each day and delete original directory.") -args = parser.parse_args() - -# Set file paths to the detection model and corresponding config JSON -MODEL_PATH = Path("insect-detect/models/yolov5n_320_openvino_2022.1_4shave.blob") -CONFIG_PATH = Path("insect-detect/models/json/yolov5_v7_320.json") - -# Set threshold values required to start and continue a recording -MIN_DISKSPACE = 100 # minimum free disk space (MB) (default: 100 MB) -MIN_CHARGELEVEL = 20 # minimum Witty Pi battery charge level (default: 20%) - -# Set capture frequency (default: 1 second) -# -> wait for specified amount of seconds between saving cropped detections + metadata -CAPTURE_FREQ = 1 - -# Set frequency for saving full frames if "-full freq" is used (default: 60 seconds) -FULL_FREQ = 60 - -# Set frequency for saving logs to .csv file if "-log" is used (default: 30 seconds) -LOG_FREQ = 30 - -# Set camera ID (default: hostname) -CAM_ID = socket.gethostname() - -# Set logging level and format, write logs to file -Path("insect-detect/data").mkdir(parents=True, exist_ok=True) -script_name = Path(__file__).stem -logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s: %(message)s", - filename=f"insect-detect/data/{script_name}_log.log", encoding="utf-8") -logger = logging.getLogger() - -# Handle SIGTERM signal (e.g. from external shutdown trigger) -external_shutdown = threading.Event() -signal.signal(signal.SIGTERM, create_signal_handler(external_shutdown)) - -# Instantiate Witty Pi 4 L3V7 -wittypi = WittyPiStatus() - -# Shut down Raspberry Pi if battery charge level or free disk space (MB) are lower than thresholds -chargelevel_start = wittypi.estimate_chargelevel() -disk_free = round(psutil.disk_usage("/").free / 1048576) -if (chargelevel_start != "USB_C_IN" and chargelevel_start < MIN_CHARGELEVEL) or disk_free < MIN_DISKSPACE: - logger.info("Shut down without recording | Charge level: %s%%\n", chargelevel_start) - subprocess.run(["sudo", "shutdown", "-h", "now"], check=True) - -# Set recording time conditional on Witty Pi battery charge level -if chargelevel_start == "USB_C_IN": - REC_TIME = 60 * 40 # Power from main battery (USB C): 40 min -elif chargelevel_start >= 70: - REC_TIME = 60 * 30 # Witty Pi battery charge level > 70: 30 min -elif 50 <= chargelevel_start < 70: - REC_TIME = 60 * 20 # Witty Pi battery charge level 50-70: 20 min -elif 30 <= chargelevel_start < 50: - REC_TIME = 60 * 10 # Witty Pi battery charge level 30-50: 10 min -else: - REC_TIME = 60 * 5 # Witty Pi battery charge level < 30: 5 min - -# Get last recording ID from text file and increment by 1 (create text file for first recording) -rec_id_file = Path("insect-detect/data/last_rec_id.txt") -rec_id = int(rec_id_file.read_text(encoding="utf-8")) + 1 if rec_id_file.exists() else 1 -rec_id_file.write_text(str(rec_id), encoding="utf-8") - -# Create directory per day and recording interval to save images + metadata + logs -rec_start = datetime.now() -rec_start_format = rec_start.strftime("%Y-%m-%d_%H-%M-%S") -save_path = Path(f"insect-detect/data/{rec_start.date()}/{rec_start_format}") -save_path.mkdir(parents=True, exist_ok=True) -if args.save_full_frames is not None: - (save_path / "full").mkdir(parents=True, exist_ok=True) -if args.save_overlay_frames: - (save_path / "overlay").mkdir(parents=True, exist_ok=True) - -# Get detection model metadata from config JSON -with CONFIG_PATH.open(encoding="utf-8") as config_json: - config = json.load(config_json) -nn_config = config.get("nn_config", {}) -nn_metadata = nn_config.get("NN_specific_metadata", {}) -classes = nn_metadata.get("classes", {}) -coordinates = nn_metadata.get("coordinates", {}) -anchors = nn_metadata.get("anchors", {}) -anchor_masks = nn_metadata.get("anchor_masks", {}) -iou_threshold = nn_metadata.get("iou_threshold", {}) -confidence_threshold = nn_metadata.get("confidence_threshold", {}) -nn_mappings = config.get("mappings", {}) -labels = nn_mappings.get("labels", {}) - -# Create folders for each object class to save cropped detections -for det_class in labels: - (save_path / f"crop/{det_class}").mkdir(parents=True, exist_ok=True) - -# Create depthai pipeline -pipeline = dai.Pipeline() - -# Create and configure color camera node -cam_rgb = pipeline.create(dai.node.ColorCamera) -#cam_rgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) # rotate image 180° -cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -if not args.four_k_resolution: - cam_rgb.setIspScale(1, 2) # downscale 4K to 1080p resolution -> HQ frames -cam_rgb.setPreviewSize(320, 320) # downscale frames for model input -> LQ frames -if args.adjust_fov == "stretch": - cam_rgb.setPreviewKeepAspectRatio(False) # stretch frames (16:9) to square (1:1) for model input -elif args.adjust_fov == "crop" and not args.four_k_resolution: - cam_rgb.setVideoSize(1080, 1080) # crop HQ frames to square -elif args.adjust_fov == "crop" and args.four_k_resolution: - cam_rgb.setVideoSize(2160, 2160) -cam_rgb.setInterleaved(False) # planar layout -cam_rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) -cam_rgb.setFps(25) # frames per second available for auto focus/exposure and model input - -# Get sensor resolution -SENSOR_RES = cam_rgb.getResolutionSize() - -# Create detection network node and define input -nn = pipeline.create(dai.node.YoloDetectionNetwork) -cam_rgb.preview.link(nn.input) # downscaled + stretched LQ frames as model input -nn.input.setBlocking(False) - -# Set detection model specific settings -nn.setBlobPath(MODEL_PATH) -nn.setNumClasses(classes) -nn.setCoordinateSize(coordinates) -nn.setAnchors(anchors) -nn.setAnchorMasks(anchor_masks) -nn.setIouThreshold(iou_threshold) -nn.setConfidenceThreshold(confidence_threshold) -nn.setNumInferenceThreads(2) - -# Create and configure object tracker node and define inputs -tracker = pipeline.create(dai.node.ObjectTracker) -tracker.setTrackerType(dai.TrackerType.ZERO_TERM_IMAGELESS) -#tracker.setTrackerType(dai.TrackerType.SHORT_TERM_IMAGELESS) # better for low fps -tracker.setTrackerIdAssignmentPolicy(dai.TrackerIdAssignmentPolicy.UNIQUE_ID) -nn.passthrough.link(tracker.inputTrackerFrame) -nn.passthrough.link(tracker.inputDetectionFrame) -nn.out.link(tracker.inputDetections) - -# Create and configure sync node and define inputs -sync = pipeline.create(dai.node.Sync) -sync.setSyncThreshold(timedelta(milliseconds=200)) -cam_rgb.video.link(sync.inputs["frames"]) # HQ frames -tracker.out.link(sync.inputs["tracker"]) # tracker output - -# Create message demux node and define input + outputs -demux = pipeline.create(dai.node.MessageDemux) -sync.out.link(demux.input) - -xout_rgb = pipeline.create(dai.node.XLinkOut) -xout_rgb.setStreamName("frame") -demux.outputs["frames"].link(xout_rgb.input) # synced HQ frames - -xout_tracker = pipeline.create(dai.node.XLinkOut) -xout_tracker.setStreamName("track") -demux.outputs["tracker"].link(xout_tracker.input) # synced tracker output - -if args.af_range or args.bbox_ae_region: - # Create XLinkIn node to send control commands to color camera node - xin_ctrl = pipeline.create(dai.node.XLinkIn) - xin_ctrl.setStreamName("control") - xin_ctrl.out.link(cam_rgb.inputControl) - -# Connect to OAK device and start pipeline in USB2 mode -with dai.Device(pipeline, maxUsbSpeed=dai.UsbSpeed.HIGH) as device: - - if args.save_logs or (args.save_full_frames == "freq"): - logging.getLogger("apscheduler").setLevel(logging.WARNING) - scheduler = BackgroundScheduler() - else: - scheduler = None - - if args.save_logs: - # Write RPi + OAK + battery info to .csv file at specified frequency - scheduler.add_job(save_logs, "interval", seconds=LOG_FREQ, id="log", - args=[CAM_ID, rec_id, device, rec_start, save_path, wittypi]) - scheduler.start() - - if args.save_full_frames == "freq": - # Save full HQ frame at specified frequency - scheduler.add_job(save_full_frame, "interval", seconds=FULL_FREQ, id="full", - args=[None, save_path]) - if not scheduler.running: - scheduler.start() - - # Write info on start of recording to log file - logger.info("Cam ID: %s | Rec ID: %s | Rec time: %s min | Charge level: %s%%", - CAM_ID, rec_id, int(REC_TIME / 60), chargelevel_start) - - # Create output queues to get the frames and tracklets (+ detections) from the outputs defined above - q_frame = device.getOutputQueue(name="frame", maxSize=4, blocking=False) - q_track = device.getOutputQueue(name="track", maxSize=4, blocking=False) - - if args.af_range or args.bbox_ae_region: - # Create input queue to send control commands to OAK camera - q_ctrl = device.getInputQueue(name="control", maxSize=16, blocking=False) - - if args.af_range: - # Set auto focus range to specified cm values - af_ctrl = set_focus_range(args.af_range[0], args.af_range[1]) - q_ctrl.send(af_ctrl) - - # Set start time of recording and create empty lists to save charge level and threads - start_time = time.monotonic() - chargelevel_list = [] - threads = [] - - try: - # Record until recording time is finished - # Stop recording early if free disk space drops below threshold OR - # if charge level dropped below threshold for 10 times - while time.monotonic() < start_time + REC_TIME and disk_free > MIN_DISKSPACE and len(chargelevel_list) < 10: - - # Get synchronized HQ frame + tracker output (including passthrough detections) - if q_frame.has() and q_track.has(): - frame_hq = q_frame.get().getCvFrame() - tracks = q_track.get().tracklets - - if args.save_full_frames == "freq": - # Save full HQ frame at specified frequency - scheduler.modify_job("full", args=[frame_hq, save_path]) - - if args.save_overlay_frames: - # Copy frame for drawing overlays - frame_hq_copy = frame_hq.copy() - - for tracklet in tracks: - # Only use tracklets that are currently tracked (not "NEW", "LOST" or "REMOVED") - if tracklet.status.name == "TRACKED": - # Get bounding box from passthrough detections - bbox_orig = (tracklet.srcImgDetection.xmin, tracklet.srcImgDetection.ymin, - tracklet.srcImgDetection.xmax, tracklet.srcImgDetection.ymax) - bbox_norm = frame_norm(frame_hq, bbox_orig) - - # Get metadata from tracker output (including passthrough detections) - label = labels[tracklet.srcImgDetection.label] - det_conf = round(tracklet.srcImgDetection.confidence, 2) - track_id = tracklet.id - - if args.bbox_ae_region and tracklet == tracks[-1]: - # Use model bbox from latest tracking ID to set auto exposure region - ae_ctrl = bbox_set_exposure_region(bbox_orig, SENSOR_RES) - q_ctrl.send(ae_ctrl) - - # Save detections cropped from HQ frame together with metadata - save_crop_metadata(CAM_ID, rec_id, frame_hq, bbox_norm, label, det_conf, track_id, - bbox_orig, rec_start_format, save_path, args.crop_bbox) - - if args.save_full_frames == "det" and tracklet == tracks[-1]: - # Save full HQ frame - thread_full = threading.Thread(target=save_full_frame, - args=(frame_hq, save_path)) - thread_full.start() - threads.append(thread_full) - - if args.save_overlay_frames: - # Save full HQ frame with overlays - thread_overlay = threading.Thread(target=save_overlay_frame, - args=(frame_hq_copy, bbox_norm, label, - det_conf, track_id, tracklet, tracks, - save_path, args.four_k_resolution)) - thread_overlay.start() - threads.append(thread_overlay) - - # Update free disk space (MB) - disk_free = round(psutil.disk_usage("/").free / 1048576) - - # Update charge level (add to list if lower than threshold) - chargelevel = wittypi.estimate_chargelevel() - if chargelevel != "USB_C_IN" and chargelevel < MIN_CHARGELEVEL: - chargelevel_list.append(chargelevel) - - # Keep only active threads in list - threads = [thread for thread in threads if thread.is_alive()] - - # Wait for specified amount of seconds (default: 1) - time.sleep(CAPTURE_FREQ) - - # Write info on end of recording to log file - logger.info("Recording %s finished | Charge level: %s%%\n", rec_id, chargelevel) - - except SystemExit: - # Write info on external shutdown trigger (e.g. button) to log file - logger.info("Recording %s stopped by external trigger | Charge level: %s%%\n", rec_id, chargelevel) - - except KeyboardInterrupt: - # Write info on KeyboardInterrupt (Ctrl+C) to log file - logger.info("Recording %s stopped by Ctrl+C | Charge level: %s%%\n", rec_id, chargelevel) - - except Exception: - # Write info on error + traceback during recording to log file - logger.exception("Error during recording %s | Charge level: %s%%", rec_id, chargelevel) - - finally: - # Shut down scheduler (wait until currently executing jobs are finished) - if scheduler: - scheduler.shutdown() - - # Wait for active threads to finish - for thread in threads: - thread.join() - - # Write record logs to .csv file - rec_end = datetime.now() - record_log(CAM_ID, rec_id, rec_start, rec_start_format, rec_end, save_path, - chargelevel_start, chargelevel) - - if args.zip_data: - # Store data in uncompressed .zip file and delete original folder - zip_data(save_path) - - if not external_shutdown.is_set(): - # Shut down Raspberry Pi - subprocess.run(["sudo", "shutdown", "-h", "now"], check=True)