From 41f826ad5984bfc3fbad6639786c221d7d57de97 Mon Sep 17 00:00:00 2001 From: huanxiaoling <3174348550@qq.com> Date: Sat, 29 Oct 2022 17:23:07 +0800 Subject: [PATCH] update en file and images in tutorials --- .../source_en/cv/images/output_6_1.png | Bin 0 -> 28576 bytes .../application/source_en/cv/resnet50.md | 456 ++++++++++++------ .../source_zh_cn/cv/resnet50.ipynb | 2 +- 3 files changed, 317 insertions(+), 141 deletions(-) create mode 100644 tutorials/application/source_en/cv/images/output_6_1.png diff --git a/tutorials/application/source_en/cv/images/output_6_1.png b/tutorials/application/source_en/cv/images/output_6_1.png new file mode 100644 index 0000000000000000000000000000000000000000..d33371ef83081fc45775f8e2a331a5120d59099f GIT binary patch literal 28576 zcmd43Wmr^iyYOwJgn%>>GN?#*N+>NU-5^6VgtW9Mog&@cB_%y5-9rx{j55H`Jv76! z=6~$_dG@pS_I}^@+xvlIF~gc!Yn|8G*ZCvtt%@80E;a6r8#f5v$V;o=xN+SF;b`gVVd`vgL)p~zy`7`0oelVjyM?ohjiUoE2R{c7+Y@V7 z*Y_?WoSgRm`veY0XDdz?0eTtWOCG$J*K@gXgU}TFbF)~Y$mYfkM}jxfuQflUZ?}2Y z8Cj&DuDAXSd1N{|G*UeY;KF%r{%r&)s+_9*L>PX^yK*$;imqKk+zc|E5de(%aj$~FX;*kUmhQy)Fuw1 zp`up19_<$k7p=mu6F+*UXk;>QZ#TZ^{$dov{&Isx?;92N_~@YkAcq^TI(}MWU&;&3 z;A4*uaMEr9hbMpD@nT;-rLn`x>0{vMVeHE<@4r!<>b$?$Ziqt)$$3%Upm{`Lh>{N|BAZd7$|DSd4nLuDKK1E5o4;txZi$ zo$Dj6>gazlzn)^1`4eXF&X|gxzUcZW3qwNdGQ=IxsCv)ks{ zCp?a9;8TN1oJE{|`w^rvW0-4)tNe==(%D*=xFX!pg}}|#^{tf^)75$whB+!Grd=)R zJ~?HYpkv`l+pcdNxEpr>b;!rVBdKHLM%K31fycJ<3SxV8j&|LyhpT&doM22I0T&yP=IOHf4IRD_DvgL0^PaCWsO{f-)jHqm^ujjao2#5F` z<Ud3yVBm^$Dha;2^Ig-5Px}8y30e~i+MyL7yi1U$@!!0g%%HgC7Rx&N{_Y(I{!;2 zK)B*E6o&6_id|nGy3X4-jLp?UY_{j>0kMUKuKDhI(~F({{(O|=f0pFh|14=#k+V%& z$HbR^cw~gGMDdQJq(kGMd;0BuV!k^rS$PSpI>6%Oqc`Cb%gf7{w^2d{n2Oo%a7vd64kL>nTWNX$Bv?F!j;f-+vc6-%{3|db0YhhZvoFN3IW{vlW!^ZY@0d~125eQWc4UMB=N58lc@e4bz<5{cqQ{bW3xGS4hsn6pi z&PdKjL&T+5oIU;hYiILWD;qyywqTe@@w0x_@iqJ+l-4! zN|?`2dnHsD85t+7`PQm?>GB|zO~E^DC&u+7^q$Ma`V9`WzQ?s$1>e65oR23#{}F@z zug2I!P7@VrY0kEzM;YEbO?)CE3LXewK>`ATF;%|82tu4D>6B8uY4g++)M`OvMOPs#zid(gW;-~MFr1!5cv#xogkwS~# zOr?OAOyc_w$WQz+uw1>OhAG=1Ur^&BTz^NuD?Is`o1aT90V`aA~vEN04 z^k_xWUd$VOx5IwoWYPQN1iKQzDgGFgKOnvzw+XwXNfG|nSBEadzl)#W|I|2U-NmY1 z{tmM83bIj!*{GUya%f&J1a1v@z7VuOu+kA>FZ`Cmn2t}_^=g(s?v6dVzhqfU<^TIO zeW*^A@i68warfuXpZB-6wlL7@>gpM{g;P9}(+&(2dpG+_-4V!?6sqGh1Ba$(BbK%M zlarI1nb+1gZEn48GAwl4oF3zc8%oXEx%>Njd%rYto6@?Nm$*KIg@sPSF8z<>{G|a~ z!s;&wbYA3t&IAHsHAweYJ=|Ck8bfdS<8ptx0h+MWvMb)aS=q8yQk0o%Sz7`Q4h~+c zaP(!}e2Jdau$ljE&z^w<%$XBn0w~1th4|GHMZ3^Zz~{WgC->J5v#v!mJ}iX+o~QrB zSD|R44lp~fvy~(Rk)9UB)eij1(YejY{|YTFjbr2pyy3CldN`5?sK;ad4&yX?x`#;!|?(^(!P2X(vV0%N-RJHKu2kQ7;i4YQ4MI z2Kd(+36A@>T3`Ka9f!l^OwI;$)>uu>aP1M$3_7nhhVx}6i8mL=+j(;IzOKe2+yVlY zm1cH!as5{(2vxzw4~)K;!z`;S!2Bksrl!WZjbSa_`UdUZIP}(S3xLLBLygzoPn_B6 zO|+LHGS=0@BM#;1+DRZdDkpKS#%l=#o)FiqpgVq4M5MTPy*3RH3Xth?)0V!bCgJD$ zO-JL3VQUR|yC-LulXg|W-CX+vfe4`@-F~Grzk3L%R?Md-LTvCGKi1Y>EI1FcyUto9 zjh&-6QQM7#qI)+W{-~FT96;A{BREnUTt4RE^g12m75Miu#)wL`xNoN>AqUaNC;rzb zfQ}hDMkC*fWLQ290Nm;RM5su^6P-Ti>$yF`+eJv*x>vfFxie9ixJ zW2$UD__4ziP;Q)xNZ8_G=E*1!^_4SGW6zQdB_9jeWw-1?(?njr?02iCzkTg)X=%B( z6v0`zS=FAYBJ}5ut)pWR)`s(9#1(*0I^3FJHJr*1n3s|Jti{E3>hHP<6^&HNFZh+M z2MW!*(2KP|5|wNXRk4Ij&sD1PgQRw)2ntS&ApOe>l?m*X3)I6#L^jK>{FV1cJfLwS z64#fvqiuliJc^C+39M{8)}O03!&&e|W$=lLDy0e7i{w}Ta)F7tV$M(#cPR{C(PQ>P zY}UsM_yh&zurVCa@el36Th{l|Zhju*Xo>VY?W7+slDM)tM@nGKusKDH&oZNCh4y;% z%bh2=h#hf%>>1k)2o=DCa{d$RpiuGVKf#axaTD|G2=CoC?8OnMJ%VfP*C*F1MOl6# zzS!jj7NK<|Yy5@~(X3q}HeQ_rzDdTQ!J)vi_Dx=1-cgvU&nL_Q=e6_VNUh~Cb@SEn z{JJA%)v?csrg@{Z>>rIUm{|9Gi6i9Ih|2VSk)SX}0o}XHlAz$)LuECZ$`4ta$if`8 zpUsqLhWlM2=_h){kGZj)??uw!nEz$Wbu9F9g_C~I1qj9$)(EKY5xUAh1GBS>iN;2+ ztjlF48`Ma~1p;xcuOhlHjSc8naUQL!`llXm&&wGZ838#@_*HX29xae{p2`z~!bH!O z<6IX#QDe6WMZcLa;Qf$XIi7bcdb!j1xu>_cq^fFHo)D+Czv*l>Z5_BQU4lXZqgZ^` zN0WPtH~fG=icK}v*&RLcR-A>scA8LY5hj2Rye{I=KGq@y*1Ae%2A<)t1MRVJZ6gcQZZ zsT^+Z)e6(b^At|}!Ewn8en@(~-bg=kSlClvLb zReNjR7pwO4!z`Z2uW8A_Fj7-V2i;dkN6H=_h@^Pf1zVciS#^BbXHhe;!TPWK z(;g=NOpud@4~vs`9vT}K^A$kAnpPggP>tZTj;N%Fx{6O(#bneMH! zkn*d|B(%uFHzSxq?fXZNmRSxmWN>R}WYTb1a(FVv1ty`>eHjB=C_R5D*u4a4=KZ}n z&YxMrwR~u!mt~;E{;bB~Bl=RmH__=3{j0$%UAxkxN7vz7<_At`wS4fh=l~MBh+1yx zX#Vc?nf~He#9{$hKPSPnlka0yq*Ky9H4 zPl}Vdifg{_QVSb1qL4zOI!mXr#xHsgudAzNz6!6z7ab;5Ru%ESKSc}Fn|}l?$r$%A zSsgi1U(OhLiuEnc2ux>E!#&@@`e-eD{bCZMH^R+S?H0?=qshy?)}6_7b$f**9Und3 zRI~*7GCn^Fbm-P^sUG1i6oYcuMn}(z4QEAcPt0p7%`C&>DAq4+SjI&@tuy;UzwuRo zX<{I6oP{%Diayd8>A~Sy!|t{2!|-0cI$czw%YqM-Q4aEn#v?EbFPrE~5t-0I2=`Y5SiPSeYMV69L`%!h!bCv2< z`Qv`%qxBO4;@C22hjSO^>nPS6!6yH;8v93$RR&XUBKnM6kB(}OfAl>f(;)o7QCaI4 z8=mrEMQ=KDp@{!B>0Wt6*7(^o#OcL$c8^P`sS!$%F*eC%{9Iou_UM743(NE9Bg7IM zR7sUwzIJ#4o_Spk%L+e39XrhDgK@=SMpGJIRd8!fEfXsWxKg=p;V$FYt^ORkThXzc zt}@1|wu+eR7j$N{^E~84ef3h&ep{k)sIfG#62rPr0b@;UVkHB)2yJhla!;@&`1#V= z$&bGJmmTqDm6cT$%-20FC0PL&KKOm}gLA`@az7{!6~?aw$16H@iE0Ri$70#s6=PIJ zbLqH;AR21dllV|+T5peOz>q`9DqhchF!`R#UkpDxuMv+^D0oP%QDV)YUC`0S zR=(Nyph6eo_rNPUpCZ6%u#Z1e*b|}7+_t*iF!JYS`@b!FCu%;9^5SmE`=)BOGOfI? zR(W1AEd7avqMo0cpG<=W`Kc*hN1K~U74OxCS_q=MvEtBGTi(y1D8f*+V=kx}>Vfi#&sgWBD|~2sR(-g+@qrHg-Rd4rO@GD_y4c z1mCw@^4)+?Hq$Ai?dS0r9QR%lt^9xd6`r4@Sl;T{C(4j&QyTeZ6ZM=}FofP4Ky3 zojNOyqCvShnbfA@wTUVd1jU>9h7dNa{n+cCv#0oV{agnZt=CqOgqH6LE?m)5JAg0e zb4_^T@UT-xS64oV_Oqx^wh8*p86FO+HC%J0ipmHjf>6VD3pz~>$W^lRDmv__vBI#O zPqqd>Q-2JgeI(Y zHxCUlG<_O7;!%>k^UN?E?mOhf@*LMy2AbV5?0Iarm<_h&H7t+sw~o!oTDp}NZ2IVM zJH)T>+;5*o@c6}DuK_dFt^QOpvI)dU&{IyacH{1?U)^`(Rq~a;sK<&QKVbB6D2mW$ z^Wrm`b^jn4P4QT*y3>s9F}017dDtOX&~215JSyBKnkEmEPvc>(DIK3x6HW3(yj@eS zImh@={R5SO)^yi8sPlIT$kL;->svA@`>< z4USnG5z;H7T4Y~cWh7^xgKz@_R5}|inGb4N9_!~yK6_Zc;ZZA^_)KR(dv%w!%r^Vk zyZDZmismm6hb!TJf+G(YpW0|jOHWSs$Q)FJ(-iDYJfd24p3C2TAMjV78<4_mL|J6~ zM>u*ZFb9X|ffO9;(%wHaSkvTYR{Py|SQi!;H|Bd*X;JQbxmUsxoGO}?a8N(?InL2) zyJ%WG2TbRE;4me~JzP)zI^Si&S=M}v9F&^`ud>)Sxjuwx;fLK)4Tv3N6|6h!HO%NG zQ=BVbEAHXC1yU=1Veu=ewXfHTfHzk6CTdb3qtARlFJC^IX^45Z)NQ^=p{U9F)-w{WwF3z}ScxZ7$ z5{3C7%FWMsJpY)JzV#qnX3Ie*d=fSY&bY52VUL-8W;~ z@Eht}|M$}fQ9ZKxR(uK%zRlp*QsEIV{mud_=iq$!%$3kp+i3?!IEa{YaF|I_Cn99I z(PVKiH#{YSrBqyp^13m&Nr}=)Z2-FPQfo+74P9n9b&nyoSl5d5TOL>sFYNmkwbiz! zgK22dDjs6ypwLiUxKsG*1>5=`7rG*XSfhZ)(RZ3|lGQ-yO<=Xp+MrT$5!x$PwbHt= zYlj4R%yQE&BhhY+i67tIr)2CT?;#DQy;F7$l3AKbT%Gs;wUCi_5nXy_u9cv%DiZSE zDCG8FxvX|e?VZ4Z(ca?kOc&)%aojeYuDv4PAHT_CYJ<`+Q}PXEFUM;1wADs_F%KHE zlf^5tNcf>&Q3%c_jy2OXp`lyt@tT;JVCgAM72@IPm`jQD!)a8IFN@k<#k=IeI)MMy ztks{!xAWQ-EZsdd^Q?XX=i2N){iReuW$TjAI6y$P;bz1I27g73K(Bw_W_ga7DmY(s zADJyTdcmf_T{&b#)l~b6)O4=JJ0v_zva?(+dG@p?zQv|eZ^mwox3KU7T`9A&dT8jR z+u9dfWmJ4`;n^kqF8dU{XlgZnP>tw`=YZ1{i=`(Hj6p^4$f-GT554SGuFlN~t;wht zwDZ&J<}%F_kO@X#~O4B9^fa zMZ)(DJ4t1Znkq97lRs#wl$FlCH=&2XLG}aVtUFUn?pHIzB6Ikxr3Z%wpQ^w=sycZV zzfNI9Rb$2`j~?92GovQy2Fa3?a#P(Ca>84E9ZVFzTpb1L&z#Xz9DuRmzy9OYOIvil zEb{G+&vun zgsUUNmd|)oBEw@*AW&*bis^Q_skM2E3N6RpAXW>pdj#0yf7@Wdh9Q3TAMe{Dtsi^i zF)SCu>u==guYkh1CVNwuopGRE%6NLWq<-y(7jt}A-OxCq^llo|5XcL9+T!8z^EWi? zfiKPHxBW?VgVW4RnLTq&@#J6a{I>b0>5THtzHcv2J$$SWdjqWs60OnI_E1d=7fy8| z*jG1d%n-Xy6Jsg-H7Fw?_Su)E(EnX%{mIrsA^4t);pLoo>0Q~hLUeOqn55N;9<4^! z?+$Hd@~5B!8YgibiDT1p2__jGCuI%g%k@+O~hq}kGwX45cnkL!_HeDp_YeJ;w$`d8l*Ho0QWc%W&m zr2Y>(UbSBsNfBdz2dPTJ6qb^IyYd7!?q$&U7OBeJjR1(N6^1}U4^!#YKG5k%FXnTS(WcY_+EXv`Ymxxwm zY;;lf$l;p>qivSeuMHU^b1%Q|XmS@gi8ruD8WgNh295A51{FFKGkcGx9>bXQUy~9DOHMBJH2m(uRU35B$7eI=5#_Nc% zs%*f5Bnj1`UD2Hi84?t=T1I4bbPz_63DquTuz1Dtv`lD%M}o*FA-)ld5r!VAHuA5?G?EfSV@|d&3GRrrK;{9F7s=;mHLXlcaKja_fZJEr$4B z7VY-`EhYWewil59f2(NAKA(HpILeVRz4XwgkH|o4N|J7Zi^Xgz;dL-WA&(N{z%z~? zFJLxm3+2f{Uix_D#r+r2VzN3CGzLvd=#`+pns&v<=Z$0>DnmhDk*wtVO*;qSES6JX zzpdG?;y~)5e|kEN{E#3CBYH$T%y-*MZ4LRh5b8J{yE{WBOK} z)~}>ca?bIScPk)v6+4f@F3S3W{R-9#y%wAM_n8WtX523V{h;nDn^5(xEL1SVh>Oua zjYtox%>7RDXBwbfuJ~0BoT@H4Y0@&-`h?unRl zg;z)&DyNynrScIai9+w2bJ&Fk^wb)Vd}=Fnq6vGZ9slXIr+;XutehO0GN^d`5m$6fyA#;q@6js#!>>nE@WdtW zux!yHF!)iE-gpl#4+UkQi2rQyp_zB}2(y4$21Th(;Ge-saWW102c0B&Z{OO4qq@Cb zx-FiZBDIU5IT(&<2Spnr8P9ZzhV|{Y418=No54;)<2N&rDg||C-OQ|RJGT9A2_Y|| zM_$qoOUpvPt%tWd8KFXJJEY6}<9=W9e_pk+gct=>h0EQM4D_29{uxi7vpGcd}E!fKSwALEvM zrs9s~d(Lwt`;_l?v|Hs}NrT7md&9%)=%Yas%-8yhiwi9!ahRKYQ~Gp;Yn@9RpJLYg z@i$B*;x=oH5v;_-%8@R&8)vC1v{F<4z$W5zAjuhHFvF4#c&P>XQ-U%!u~$|&{=3IV zYgX^ALnA(w|7y2reo7F}W+da^&JpeYH$U>Pz1}*uCtF;l3uGtz8(yZx%8%znyz~qP zQFT9F+^_plWQ^(+5PhSBn=4}Ub_$%9J#tsuR2h%5sHf>Z+sfwIH))e0=+H6$n+*!R5H|kUTG^g^Z;3L7LW>6V?>t*ci$#-vnQ+vWvMaF7C1YfLOwCDHFH0W$@mrND zUf(i*%!2@ySOFqzD^XJ^=Lf=DJy||K=%Y5e(?YR+c5V;kFknfwUoQIRj^9=tgCV(M zf&SR8B{R>{@hS4pg=#AT@g3^EunDjdL1%LDi^!A#3n^Tno7+Lke!B+&&)D>&J0JivP? zQvOT1V(3tP)+Z^vpE_8Q*@V<)->p&t(G$hqDvv5i<*#)6cJSLTF$=-#N>FLG47n|i z@Ut{sRzyYgC9~SFASt=0aAEGdvw1GHMzo@^x#|revWeMFH(uUa5yI1r_z`bypGsQ2 z@A8j(%_lDDC{Rb|bladjZuNnOA6KrUTgoyccdLUKJJF+>A!$6Tj^lW$g3FDLDJ;A|_th zj`M8#yygpvmApPjJujyn^^oG!0-Ip`ZDb)m@ou0>anNhhJ!tXWKWV!j>qu$Cr)r!K z21Vr~3mW!uA)6+0Ap&>jpT=*;dMk1F)t9aJ-#!ftsjN|Fpr)DzXVq~^GxvxFOX9D4 z&CM{Tk$^hu$U%k9moD4(`nsC(>#)0Sewdj@RCL{SV3B3Y6GM^Ap@h1WzThNd)9r(y z{P6w<#5PVUX$r&Cmd{BfMK$rzZuz&N94GdOQgHM_@34h-m3}FBj%T~n(TlhJ`;Pln zvrtJ)1Y?MIU8si$xo?k~-9m4-27g|-xm_r(1;2?l-4?W4jSmrKZ0)0_m;KpEVdbR%YSABQ$Hgn zCZHsvvPkH0SM&W6`hwMPj!o*>EPUYVTVQBnjz&QT;x-A1e)_3wrQ%4{k73*882J-37j%rJ)NWUoTM#e z;6&+YSDRP`3zbmG$PKM%qPNY#)=!yME840QdtLW7IhT*>^_RN7F_g+CqA#(&IYET^ z4v&RMbtv~I+a;TkSXx`B8CvAW+g=seOZDdZBqkCuk{_`mY{kd zuCSFlkEh>yHt=#XNOOsaSDb&9I8K#nv8={xsF>O-mVfu)G+{mXB`r-GETk%OqPHZi z*YD_{trNc>!2NU1nH})cTN99imNcCZH$$uv-}y;871`+4d{+-`sbbVr2DWuT;}6x#X!+=4Kr z)Q%G8c;zH%)X%x;VyZGX#;u$aF|dlJTM1nGC7r7|_K8SvJPUH~(r2ZxQWS-2Mi_%n zocJDudrOLKX0(tknw0hK`ME}JsgbvK8k2>QDZb_yI4gCqce!C5*BQ$2(aDJ8Re1eL zkD9XeeSh*%bF5w z6oVI>8^%1&v`e4b^gbu!V58fDqhRD8_*Ly=rRvP%@fO{8$L{gL+UsP{!ZvB0;X_ElXG$yrV|s;p>;yBLbMZ=WH5~hryxhhNa#b$zzv9|9;;OMCGW|atYwo*a;`%8MvA2n<< zrci1=aBK!rWmBE~Y=nmLH+aYwIVWn<8BFl^A{)DP0fCPa+0@6IHjZ=P2hzx)G&Shx zWe3KZ((DwLr(9&C1xHbi;jNpRZz#M|NPtqIz@^kL6$MWX7XO&4uM-=7c4O`!nC=J4 zC0^hlliAd3Sw^;+EY{;;B=^1G%8tV`pQ2hXOpN8yOdnFtXQX1?HGld?xLTp07lA@a zKuWX#?)VFvfPGsC z+_ur2yfXEDvWU4a4%>4~dM$Yxo8QcT=6&MsSv0n;#*FdUnQ(g-&)Dg)(=b5PxqBwiNsBBXl^dZFVt()dv8+1Mj!_a(Onel@tCs6F^5 z#dKk$zexOGs|y^;OvWgq(|i9j11L8g)(f=2Cm3v^Wu!^*0vyakWeW7-O9<>_Dm?G3 zSxsW#RixcEJ(1 zCxYL5XEjD^4t(&ZhG}SM@@%bdXmc}_i^SGTbrPvm-olpu1LjRs$*6|Q`hnz(3@zX3 zmZiUTf;amKbV3Gbz$Qf_y+yv6op=P-6up0%a=9E|L%w^ zRQ5jJ(&CQOASpMhT(fG{U(sjQ`xX`Y;+c|v?kh+=jgCJD(V0itp2p(G2j~H?p8|tp%{8d!zi1HXy~fS zbzSSqCnr)cdy3$}iKzd`dp{KA?OaH|XeKkh2V$0usbY&6vuC${J~M z+1tkPdB#q+-ce?_Z|?-2oU4uu=t@q_Acp$AbA)l%t<&a?(hw{D=}u8}CNg|TS1?~>Be9py~!1hv#T zjBn@2LE(Xmod>0{7-4P9B#*4EGVc%AZnB+E3%H-`6+>$rLB*bbg!zR}?T;#FS zAM**MXdy-e7ev&}v}mJuI$xH|?D{7oUL>KKt1=gj$r#=4K(+4lCkxVc!Nd7%L7)N4 z`g?^HQ51ojT#LiLzPHP-M)M$q;+azBrsW^7l>-g;-&>T(S9r|d?NL*;;oJI8$77L_ ziL&oBFI;50Ycy5510_?)zEsZX@CH7u*N?3?+XPCc%8y^1HeSov63wm{w{&+*hs^`? zGA+y^dlFCp6VbG?6!1USdo%V({1}a{c2`x65&zy8?TbXRJdZ;mbR1uv(2((Rab=$` zH$wQ?E2{+YYl}O9NjIzv|66*GXDckji7JFYZv&tdtgT9^fP_^NgJO$cEYHgKe4$3g6SS69XDv6|( zTQqrlkAWT3bjQ#in&{UiCUWNYrv@yMB|$ahP;!Q*OwwM!u7kJ+oTcK( z2F+s!;1}to^436k&cRyv84tnM@S1n$i9Kk0$C}Q-HRX_U@VgsrV%w9fr-SzDwj+|r zdtFOH72#}zf{DfKoWIaKdhCTGcxq`7&GtBugkn0J6c$Z7Nj!T?a@H#B(ss#rc5Jo; ztRlW*K0RVl-}s9-Gv|MQK0+uE&&-Xpv0xwvo>-V1`_WJMngot3y1QP*|uX zs_y>f-@G6;9pWh^a5w)DoyS)}KsBJ{@D5oN<&)FtqLrftsy0Re@^cI7_`yZ`)yO5d zGF$zRuaVeYvI*_vsX?lH>*xA7Y5X>g(ac>9{vW;hP(%!Sef_$$#mJ|K~i%Szl8p1=$hh z>?GOE7lwD`)A{7Hk3%joB^ZxsX;mz`%tC&+a3^k6Ix6_t;?gJKUM|s;luYn;WR;Bj ztWV2E1dq=mbtS_waUoUZiUOl_Q023Sq*H_7PI)<1(g%Y=c5MFTi$9D@C}XNPh>4Zji<$d(MzqN zuNIecrNPxk%13%+)a+7uP(S0?y|0R#YTQie?{7h>On`lO(ZeC$@~Z*X$67-@!)dxN z5ryrnAt51J2y6%4+uB;b+tt;r?C-S@JLKNMXGJ%Y094;#yQiA}w*fqMzPgjUL10qm z&Sd#QFs&Tn!rW}1tGalVi?OiUBq(h{xVX6wsS|a_RuuuB5)^8VtY>wJV=2^I3Hpj} zLU_xmsi|w%$Ma`iE{}0a~suU3{mM59q(~C70 z5#h6SHgOvU0K@`ORoU1LHZ-Rr+q9!q@57)O>3`tZ1r5 z98yctqzqeXDd|{IKs-*?T+rx~+GAh>SSu2}n30xdwq4DAl{r^;rZtV(SIyGq{_8%! zik(*Gw_8!IL^rPW^$i^kai=+w3BS4j98+M0Q2fuMLH}tVh6o#V9990(Df$0OG5xy` z_Af{BR|M++cOPzjt;s_6&r~q|^*(*f(c2$t1Je}-{lIuIFg(3_HKUlsQIO^c^<0d> z`?%cCl$Ag4*q#yIy+ka0?R&By7mXaP@$I07?S74dYRQi}C=c1xy34Gt!^k$HEko)6=rI zZ{PlhCF1ly8@$Rw0ALyVuBT`q|m7zIT7*L>TudR7C-_6b4$zmj9JVxP%a>el)1S%YoIgXx?Yfk zop;Az{F+x14T{xEXx=N;>qlY9F_EjIiIUpdI4>`+wRD$}9Qvy@IhJWarof4728eTV zELH_v7t25#A?dGmyB+50!YbP_UeuJ7*-Fo)%CswTt9-X1AzYB(*2~z@=j#I#3?4Bt zI{@2Ux9njT3p;e~x{{zX4$NpCIR>cI0ALS6LH5o1<5OxCV zSYk!uqE;dgpq%24mg7{%jXV})0H!7nn65u{i_xiq+t}KMi(~d+8pophwS|&o|0!fSy&I&zUm_+qj;){T^ujV{on!ciT>Sq)Dp`8aObsFh6n+SJy7MvA4Zph4wXjlH25GzMnUX zHCc{Sua8t+7u?{qdnWrP*XQu{NZyfbI{`(2KVnJ9?WOlmH}*SQ5qw|w+xpr4Tj!T3 z300=d51@hx8vuwJU+1%*t1DUIKiUBZR=4YU00}Ujh5{60Sm+u>`)T*hn6rglza%WE z2|$zc?Un8OQI5%G&8JHdC2eiK{@#FJxp(iLHK1YANAq=^SOv$Dh%1MlxS!r#UspCR z;g(L>9!+Yf*1J~x9v%dsF9lYeO3{4S_j4$Q7IC>M_>a}xN&dvvJ zF%4M81Hjd+0i1QG{n`(!a1{m((TmrZ6O9UOjr{`}>2@+vjKgJITpa10o4)8WlLAea zZj47anGWC=ySk(m;kb*ft&1WKLE4M(c34CEl|A5!kA4#ReWB?UUOt5CeYE_2jWWd?bqG_!~~#I4}}U{MTU(>%*@PI zO-Sk_0j4g36Po;w2Ll#veTWyPdHr&9^-injDIAK2gM-7IHareY^uW$n^zBxBuPy*d zjhe-X$zokP@KK?C8O6Lhz1DI<)*P>o9oGj~ ztMXs&w#EUrEzk^9Iatqi1XxibU~jUoiMAwE0D=*qBm=Qn?-&*X28g^#0A>S%wwN;o z4*4A&W6x&*<>c$y@qlR*tF$oDLww(}K@*2F)X42vm7;ZkV=PoJk+#Zko5r+1{{TQm zZ-AJ9x!GF>sM2|#oVX-luI$@0)t`4-4^^n?=){*L+L{4UXbli!5gte5NAb%)gcCr8 zVB_ErE#SDQL@05|bp;rx#N^-|IpU&pXI$bdcd=it6UyO5_fSitIVb-(85myU{ zF#v{DEXr`Fz8Y-59KnMd_%h!fI#jd;plKO9Hr4 zWST=WvvH02gO?+~s*qP1w}}95nwyyRZ5zQ?z{*_zaf)=5l%YWT-v@xadfNfK35mzq z$^`|M;G~y!t?B`gbxb)q0^Dlj+ZSChU=e+Qu-M&X$g#7&9)`V2FMySG>jO+#H>V%B zakHyUdcXOSR+)D9^``$-)fMW(H}nN&O-`b8#F5qoQ2l+t6JS1=n?F@4PR0k4wcahX*LnAAkf?c1qrSdU}+9fum0#ESf632fJ$MSM$Na+%=PA)ncnn=7L zNuMb#oo;{t1!4jI&v~ZOn*rJ7L_^bpm__#3v&U)q%t&YVX9cWZXj25loncxmSv}n?~Ts`{f zn?#sh^|A`=)$;P}%-5{4^Esp!yb~7s=~Y%c@!nZg1d?d|!n)!o*E08XllW1rgobZO zn)YG~ecMGyX%(N)z?`0$xW>cw_RJ!90rFn?!c(JLGyG6lpK5m_q=={!W@h3<#&77E znRuj5IOB9BYVCrmsg=_eU0-16XP=^L0@-p(Ge;0=MwS(2r4su;6h$Id55~cAz0zGV zaSzaemn$zR)^GN4)A8NHO2pCjFAc$B{rivJ0D!7Jj0pYaX4dXzuS|GlzHU^gE_whS zVFSRz^63cPK>t{xP8oGI>mzVI|dV*@6(; z7co3hdf;SfCrVM>i4g8Fec^hR8YAjUZ}4YLZy)>2LTvw~zTs_84Tv8}$e;7po9{1h zQyMz_yiG+*Qb$Pe2;H`lw-@)(@pV+Jl)e08}mBg?Llx@8$FZ^&G> z66?#w8eLk#XT2}yb-m%GQ!IsWI9!00Pq_Xq=g?bGli(uCpz*Q3(f4(?{(W22%J^)30Q|^2_cJHw7~*)pFdCO?(|xPH7eIhbL3O4=2Ja zOg*2o6yxX!kK8a)dCG#I0#P+NMt}#crF=$81Fa1GpLUBWBj;$udwld^YE3_5Xod9e zH^1ErddHD4o-4vUchp)Q9_umDzj2n30NV%_swqz_-g!hE%jE#f%|ckZ;jcTnKE~dXRQ$mdl^Yt$(-lKYi966tgrfiqn1J-S zIBQCg>~F_|-!{gQ!!k{6`E`GP`e}yiH4kc#?5G-DM( z;Gi#I@(ejw$lB`Y<0DD`vNJ28GNWCaTk-gk7jhD_tI4f`)Be5MqIZp3L_ZxE`r8~{ zt$QJUe1;appU;WcMWn2aF1Gw-0BESpGP|Ny3T?q7O0h{LoYffgiHsn&*25=*srKl` z_?-f>;g67Iw1vnZKW;AD&WmDb&o4g6?)W-COOq()vT$O3Af<+R%5tdB*Y!&9)-@;- zM!h!ai8?XHs7zVn&ktR;K2dh1&Y0o8}*8E{{}O4!}=Dcbz)F z{TL-{eAq6aNKZtmpV>zYcbusa>dLBWt%5h8Xnmr|S%f>g!hQTC#N>Ki11~L>$wJM|}Mf>+i(>|tpa86E`&E0=L1T@l$ zd%wu6b1Qo!K}badPw(I%Mayzj=PC2Pu+%cD@Ce6DH91$_gBWyR&M&0b{l!(QX;kR? z#~x_PqvL*z_dZ;|uSQ%nx;`RD;KzP9C1ajQI-^689SQN>;QRKkNYgG4CEE16FGD;I zpiG%Q3f-bdF+lgztux;;fJ=LcVw)1grb@Zr<)WWFLHbW9Cs zHrEt~9PraqPpvPjyNM~97r`}(xJ3KUyo{QcydDRCbV`TdGb1+-t%7FiZOeBR7Pu@Q z^=K1Fy?hp-e7szzJ5Ki8IBKq=qLKMcQ;$r3+Bn$9eHW@Tz4~pObZ?Vs|{j&tDPqAL{2w6oG&A@xL1T&Zwr= zb?Yr|6e$Wy=%^slt4J>@y-N$d?1T=6-XUNil+Z+^H$x``B3)WgdJjD`={1Om5Fj*f zyyKkjo^igjzx(}KBY!f+TJL(-GoSg)IbYqMl$F2q$M(u0$>-8N0S)EpM`f+3oQ!!? z&vca;A!y|*o$hxdu~&gNXPcWz(k-SlcGp(jzgdP2pg3*aF<#NZ+m4>kW$A%yti6fm#hDmF_rf-dP|%r~a-fc&m034WEPt_NkB_H4 zy}JeP;6%`Oti-(WYgsH~Q|>UKMAZhmr~~ZMfh!r?(`Qh|aSE;NXM@E@GSlIi;0lFk zRj=gf#~Y)^{K|C!_Ca?G_&TDTASx`N1p7@9xN7twQh!ApO}{fj9Uv- zA3fcd0Myi2Gm3DEsfX#hF|>2qmOqm&0(&c4m8W9=VY0h<<^Kvq`dv(K|K1B%m0Jqm zhpTA_lJI~*oybG#zY3YJ(j_@Xr=7ozjcTuESV*F!*L=XtnUCkXx4Mv#51%m{8K03~ zUaa!}g={EGe;Cr%G0Wq(h4+NUk_6o*W3SPT=3$T-$yS`3S^?5yM$0BcklmTXKz5}k zmt{Pq5npp>#hwc1TWU7nH=F;GnT`%JbHwI$#s>{&Ol*PGS=&Je`kidFSQ`py48H0l zVVxFe>r*#XCZ86}+8P;i`P%zwb0SmgmitmE_q5L}(v8dEkFtXxEfTeBY*UpJDCCFG zd76S1hczZLC3Vd8xc*4GC#mNY1fOpqwB5$3DPFIiB&UeK#w<1V;>HP<@Gp8DVBhR!v~yzd4K*5 zj18Kk)<@}@TOc!+DD6H(3L}&H4q9}a=|jBYLP0fO4i?v|C8;bhKLe*KyJhb^k?B4t z@mZA>IXqAgCt1Y#euNH_m#~+ETfT#)h8_KGetf&RA*leFJ%4?A^64uI0pi`swuDHD zn613aodZ|(&hFrLlvGE_rYJQ0Tf6sw+!dRlo8RM6?98!2u0H`&M+z0!OH)1>MDOj) z7dkOD#69)zAnsT=13V2|}(kfS9J9XzoQFy`8C$qxJl8Gog5c+>s%81? z5J-(MQ$B_%ecYZX1`b;Fru8o<%pKm3p{ir+pmsnkNj7fl7Zj@|E0_zb`6zUtimTrA zyk)rVe?u1}TtaTDtNeIwswYL$MVXDs|A$kTE;BYwbVIYzf)+w{H z-yIgdxO{QV;$zwYQNe%gv~kOQ)lKku6X#Ow9V-plMr`BHDM zH=sP-jLBXm_p-kdsNy%Ne9oizzavpE`xzRzqG``#Rn-d75pbnrd~6zCxBT=2gZrAd z71uk-ZnKP)?{1S#3|gAohWCMqD)0MUKvSM_Pi$-QRTB`jgf2Bg7m_wTXxCzi9u~Sl z8fu%3N%6<68@AZnDi&dFk3VS?DLBNa5{WtFEx19=X9k7$V8djL0uF|!mm9D%%YIMB z7ZhJC{*rcRV%`ll?}Z;U%K%p4n*v#*oXx0G`*iIcv8L{H{3}l{ft}5EuM>ikx5eYx zscyR?CO4Q2w7TNIga zp$==xUsUFpT&s=@FYjB7y}^+u)rM0(iqg|P*c2Q#L8P;CAXNk?2K2h;ZOEmol8es! zdPE(*Cl|@OqahWJSuh#j8$h^YHfA}6^eYa31fi(6ZH0xNDREg5dR%P+!e%-6Pu!lN7;O33)PnU?P+9KH%f?H^ z9HXIVw!dUbe1ha0d4P*g6T&MWOiA%QpP#my$-fIY5D__GGo!a?dJX%6eCA$l$4u$q zpf@>suo4prdBODgF&SnH?+2~4P+mO9>^&O>5>6?VE>S0ibzHkHm+1}be#7(q+x444 z3zHT;e0jE(U83BX8LOEeGLr&my3VES3Yv-yyu>L_>OQFv`Y<+O>_hsC1)1qRj(=@Y zGVoXB9+Xp4#Y&DDpU50&UooGBdx`Ej^P=(mkj&1IJG_($v8Z2(y8Bv)o0_Q*7xxxZmkP@qu z@PJ-zp)B3o4pFF2GnInKMmeU#{xT)VSD*QsuJF7@GJ|*AO%_f;10~l|kEFX#pFeR^+&`2BjOoNs21A>L2AQ?{?MgvD+t!8eKCjf(`~*9Leq~D;%0VnCEldM zQ*3D>f`O>3bAT!4pbZ;fcB=LlUaSOnm(PBB$N;zJbLQgYUpr~?73vV(5Azx%uM*u? z7V3R*N2$L7b0H5f%G@{}9}NT6darxQb4%(JH4&OiOOmss9jI$+%#2M0*J^p?ZgYDj zaTp#eD0m{y3Ti>fhm+A>DAj}_Xp5|ym_z#A-Bm}`JISYEIswi}4%VSj6Lyf`T8{)O z43F#S^y;1b{ugr};D6gVkPzzh0Xle}PtY!XKYOmjYf{a>tK1B96Y#SBtx=LV-t zfrE`aY$d09+j#KqX~ZxMqf?*_f1IDY6$5n)8w6afUkbNsBgM(+lDfe-E1&0bSRb`l zk{dxTB5j)ji8z_o>~g{%uX>}fl7yytvEkTvE%l~ov|}%ivkJ>@Gp;Ki|8-p?c`ezKquM>B#bo9g3lsUKh;Ihm-bmEk2x| zZikyd%NP9>-9$ppnE4&Gd-Ik*^Nt*4rAgHst=SuN`PT1P{o10ysi`K}FjnzKJ5GEo zct3zswQtE{rGb7!itbaA<+v@HugL^^7(RIe;lNQmW(nSn+vEG`AKADgXLCm57Qi&w(Zp{xAys3_ z$_C5-IWJ}|#3keKl=N%ylG6=GCPb0X)|P;A7kWQ%YSn9)GYru(GC z$f&8^L9nxaCN2!WChi0eG?Fj281GhjRk}3g82ub;Q3*>cy2iMe8jp;bgwytGripPP zV=U(=Xp%F@;Zjb~YO1!aCWd0MYJqjF85scnjlQ}z@NT1wkX3GmFcR|uzSvf;x0=JL z0`ps408}e|#!YaxHSO!|(Wz-T9Afjj=o`h3ZI|g9O%DE@0#WA*p8xUBHP~&uU_ZN9nY48 zciLO!F7`7=YPCLS`KZ^3@&n9JW3`3M{ocVd(npA0IW_W8EcSJ7YUcES8wwrzy}hO) z##T}2#`~hRg6;Ms4Ht}#T;A5q4Zj)(AOda1SC50goz0CODi zWbE6YZt7~!#1aqd$lKp@b1QVmjaZnGhgG-puCJ~7T^_BaSlt@2{YW{s6`=m~JcBBJ z#jJ`qb?WhV1Fos#(L-#ZT@<1DV~r90gcv2cOulsTW10t?{p&=pd^7A`@5d?c>@ZTy z!}og=+=lCqaAkumG6x^{7UZg`uElRwE61HpR%8JOYey2kB@oU+#;LJwx4JFF4RjAM zo8=aH&$^k;x|;1~d6y(`OU26DwN;p`(7WPE%fS<5>Z9+j|1QqUfBfgPTX8g*QbQs6 zeJ)=?-HuR>he(EE6@UUKC*qj1XOyhznY!tmWy;Fy%0rT85@GicXPIji~`ZSLdNud~xDYvNeDUDLer*Xf6(RKEZbc6~9{-18!Qmfxq+ zG14^IN%jO<9$_&EMD~YNdkVA>`7j&ucD8IJd9uf#b_HKK81I(^S@o%dBr;G|{2GeD zyJ3CyT|Dw6<0v^nrAAYnv$Z3K5^=~6&wM>WCeBRz6DkteR|~J+ov)Q*cN_7QJ&cKh90$+=^Sl9OG ztu96>l2@3-v#3c7c!6FXw<2xi%0t60N{;cz0U>Ryi9SYTao!Ti)n1C9Gu<@)& zT8|l8M(>SKTlumG&fvOEd3sK`W*Ze*nO^v|Zec{Y&?|!~7TQtefv6@*SPwng z`$2dtUb0Be%+Vr{Mu5h^EZ3LbGCX80PJS%WhjVpG3)_=cqDj|c28tE2x}he$a!bx^ z=Gr&ccoIo$)RFOtY$+3r{{g}Z4X8c%w#50pG7z=TE@eHn7{Al-eRb0owYbf`lHq0& z2hUr%3z=(u-c{Q^TC?)|Q{`#)*^zYlAH$`Sjl@-Z<2eBeqxYDnS==X_4c{VVZ%kUQ zHf9y&4i5tr(}Sd{ehp$ zB`P7+)Ts>o1+gUkk&L%!!Hp4?Et1eA= zT+PTXBv1ldYcPYp@4b=g{t(hKUR)fDx@f0rGZ0IW!I)&evhl%)HX+jp$2jqP{EoZb zt_lF6R&Q-=pFLAlPDnW1Ke*-VPioX*Sr?OaTxAa#ex9-XUIULm{!RM-k0DNW5P)`L zu9l(?>uI2oO=_WZK9kcHmEFT2#6Rg&0QzAirZopX2H<1H&!NS zvL^5pnF~wr<7Yz)xZ_-ZR^}G0U$wIT4OE!U#O{eKBcgcbe}5++sxEw z)x}0$c^<;Z$O?q30&4W9WxC^Ks73DdYkFCH*c87Xw#Q~}lN_Bd`^ySyA`pt#Rrtd; z+BWL6_J^l0ozyEa%3cHEA)I>|68Ak}pLF|s%h&@l)7j->I z69~ z;^W$-Avx!_p^GC|qm_e_5;TdNwlC5L>@_!)ltK2RalWrBMuw@#{g^f+L7AH#2RRlY zAJ0;BQLSzV<@}CF`}FFylRsNNu1f%ZX6?%N*a+g;{t|r58g@G3ZR+voE3;Ds0$n=n zO`}FiRXn=6k8uT}Uzs`Ux5VisY*Yu^E*LD?S;-gbg03|O>U56{MbA6aZq3`5p>Pfg zOQ-MjI(&Jvmbpb!7d4Bz4<-?1eakpHCh{~_Khj>Y;(7GlPEGzfB(gpxV#?0jkP&84 z`hF*P2r~C6qpqQ?IB)x*wz_#Uy3O*p-;KCsD&=k{Hdp`M0BU4(4jr8=@S}P2Mge8b zmj)cv#S5zZcLh%`54t=mW2W1lcYC<1LC}}R^SR~hZ7GZ6bgDyW^Yz> zP{Jw^&$rs6qnm=8Nkh5s?EH@C5oNQkF^m5t%#T#n+hxW$>Du~LIowrabx$d+UXJFB zl07i}W*Hn(D_yZzdMzzgwUXKk_Cr3u*x;y4sHAcwaFG< zOcI9Bm#!cW%?ke%S*M-Vup>ag~dOUUylOuA|r@5Ce2`Kqk;Qc8m>qHZReLEs$-zkcJ5@Q&h?tCfx; z^PM8Y(1mI>zfqW0V?HbZ+J1jWs%-`d_TQc9nk8vIis5`ICshtb%AEONFEs-3Zrf&j zQ&`E+(0+gg;sZ&c`A7Ih*yRZpsAJzUE&vGo7XEARuujObpC+_DGR)S`_s3rC=jbw1 zV)IAi$(=*D@H5DT3}}9eJFX!*T7%+Ujt13_=eAIOHz{hLN|V1)>;HERp>K+I3_`>v zWe@KOx~>*WnN+3d$YGZWG;ZTL?j6xQx4T679iW#wHag1A={IAbM(QP!rRf=C#k^I$ zP7dG)xP>)hI8WxJ9X)a9(k{_dY?-)OeN>rn7vM8W*fW48`{l}=58Z?azGLj$W*#W) z5q)A_s?vVW{{o*KR2)yG$JXHt2j?2?eiPLRSGgW&6lzfMBZx1Jz!FD& zN1)MIu%IMG;6x5(A%J8m6X0dtFvC`c>k<3{emEVU6!Da$U*2pKK9s8{c!P!XiK@9i zKFQWd7T_eI(!B(;L%+VL7!8+edUEUbfU%h;oupJB$YhVpmA{!E{e!F>KBuJEAyNMn zjZE%dHnAnLKA}wJ1xRxzMn>+g$SaydqE!6!pWb##=@_u3#X<}tKMU-7sp?ohYhtE@ zB}wv^O`Z^%r;f_I$s_olXN^fa2ur#Y`XkfDe|4y$F6xiZli+2nK-PeZQ_{e+Fo>2J zz+o-K{a|xxa-V@Rmh{0^)HXJuTge%VMJu!3Yo+4s{?Bmn`eIK2~8_|M93Yr>PqB_dQ_d ztLot(Um1BuT-qe@#6L;d?`n>b4i!)S`y?JJKV)P?+KnP{H8Bbh_1d4ZA{)1Hy!M8& z=qJ*T@=yJDS7jPRNz~wxWNyGP3(h1%pxWOjxCYKQtXt(j?}LJuv8W1Wwm&Xd-f}CK zB7Rymd+yIwdv};@!ACU_z4(X`Yc86DN!}6=MeRPEZg|hqYG)#u|I7 zK~oQKAX;Z3U27(lb($e1ZuA~7g!PGkL77j}B;`FL`mIg5^&f6K?&H`GJ3p|1SXF@C zGNoN{X3{Yvno(KMmas}_7Q0*1gFN4{2Z6#&w*kUA3YbRewXM#<$r^< z|I%fQ+^T(B)Ifqz?7NYOc!AEK$n<_dyRt~RsynkOmAO1e;6Al7f_Et-|1<4OiX$9l zjdr=3dE^egT=-%zm767mOxK9E%o; z8;Ra4=6L^;bHUWZ^_9`QXoPwG{F2IKsU8pi4gY#p;2mp4^-2&mJ>@c}<93rIfmrlG z$iZ0dFyg*xeN6Ox`g8YjT25hOuyPOs3C7gzAYWDWJTc82_feGU$P2-hV$ue-1HAef zQ)Fqr_=Ssvmyk4`NG5pE$vEXG?%s{0?-R9>RKXA)TOXgIe0X2O!Lh^F(idiQ+!j6$ zTm9ubzE!kZqqGaG&;lD?*Lni9I60GkGUdndvWOx2jKF%RT<~bqz7=~MO++#>+7yk$BK8Q zAU|trt$rCtr&WU8CmNR5bw@YD66a2d-x=a=I>2qM3kQo@N5K{7K z)z;J|wm&%HMY`1z?WLH?FR9ET4!`Ff0b^qe2 zvavNDlygiSn&cyXdxZd(wB`gk-jhOv=*gkxuJ48U!&(3fWx zc&zm*EI*=W&4fLb_j!e2e#@Ys)`h|DL+ZO@hLO zU=~oct4^g7#K5(|`}k)iO%+DyO$iL{HQ?fy;AZN`LpWWihQ`$unA!S{ygeTO6cBkO zGjHPTNx+} zh)u|Wxc+2PD?*M;&uW0u9VnJdRQzNM+8L5WFT^_SbvsJAko4wrJ_NQT+H~?N22sOY z)`<6b3;RY>-_?1{9mL$)zza1+KSV1KPE}jv<3qt%VA(Y4tW%=ZV7@I6P;2OqD|qv z3f%*9sKwe1ILDyT8knf?x$+*aycC8|PrNRaJ%QeomF!3Ds1%>dh|FpVo=U6%X#TMv zK3vqnc))M{7)Y8dOa&ix;XGzf7Bp{gQ~(9*`8pe_yx1#9))vyU{nvA+en0E~?y9`i zFvG_x)xEv(TIks6>4IJ|!Gr?wi7K~rzzvR9dpqfBgNcx$hB3Zer^CVdQei`J;M#`d zOzp78y4-c5SDX?*Fmba1UL{;y>yyd&()_I42{@QQN=nKT9xFiWvQUznTu!H~+I-?C z9YUfvr5Ds~rVQex-}1}2-Q~jqthnJ7VW2(C%aW|O?PpNF9~$^#%brY~MS;27)*Q1T z9_e1WAq7de#K$R~Kl%*yp z|3*0XFC@Zg>JJ?fcgUiq%7ak#iKl#?|B}UF+(Ls+Ci11|22^aD>Gn0*D&R8m3mW3y zRGMfsl6N34oQebr@E031cJr~H)<u__`}R7Ex|XAxz8V7BH%7tV7CoKIXo(Jp?O}Kt`p3UCo z=@%|3-ivi(>g&V9-Al3ex>~Tv%Pr%^=&8gyW+3+fF4QtglHg5~T(3VE@b$#~MfCA| zqcN;rGwrWy5(7;5rN80#S34i^9R|OO>`Sx{%383@7VAIAjg9hEEGk!KIdwL=O}Zsm z_V6oWMmAoc_8` zN7|O%z`=mFW9zA~eHn=Cg!5LmJFiX}DxXF6mxtr6ctx-GC-G7~E)Kh7o}w0^z+nNi z=D!r39d=j#^CaS*XT4c4(Ee6OFAd%Je=7?9iJSgSPyEln`DdH)>w2csv?Xxj;qSBf P{?Js_Q>j$?>-B#Dv2ivm literal 0 HcmV?d00001 diff --git a/tutorials/application/source_en/cv/resnet50.md b/tutorials/application/source_en/cv/resnet50.md index 35ec456098..094f1386aa 100644 --- a/tutorials/application/source_en/cv/resnet50.md +++ b/tutorials/application/source_en/cv/resnet50.md @@ -18,71 +18,155 @@ In ResNet, a residual network is proposed to alleviate the degradation problem, ## Preparing and Loading Datasets -[The CIFAR-10 dataset](http://www.cs.toronto.edu/~kriz/cifar.html) contains 60,000 32 x 32 color images in 10 classes, with 6,000 images per class. There are 50,000 training images and 10,000 test images. The following example uses the `mindvision.classification.dataset.Cifar10` API to download and load the CIFAR-10 dataset. +[The CIFAR-10 dataset](http://www.cs.toronto.edu/~kriz/cifar.html) contains 60,000 32 x 32 color images in 10 classes, with 6,000 images per class. There are 50,000 training images and 10,000 test images. First, the following example uses the `download` interface to download and decompress the CIFAR-10 file, which currently only supports parsing the binary version (CIFAR-10 binary version). ```python -from mindvision.classification.dataset import Cifar10 - -# Dataset root directory -data_dir = "./datasets" - -# Download, decompress, and load the CIFAR-10 training dataset. -dataset_train = Cifar10(path=data_dir, split='train', batch_size=6, resize=32, download=True) -ds_train = dataset_train.run() -step_size = ds_train.get_dataset_size() -# Download, decompress, and load the CIFAR-10 test dataset. -dataset_val = Cifar10(path=data_dir, split='test', batch_size=6, resize=32, download=True) -ds_val = dataset_val.run() +from download import download + +url = "http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz" + +download(url, "./datasets-cifar10-bin", kind="tar.gz") +``` + +```tex +Creating data folder... +Downloading data from http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz (162.2 MB) + +file_sizes: 100%|████████████████████████████| 170M/170M [00:26<00:00, 6.38MB/s] +Extracting tar.gz file... +Successfully downloaded / unzipped to ./datasets-cifar10-bin ``` The directory structure of the CIFAR-10 dataset file is as follows: ```Text -datasets/ -├── cifar-10-batches-py -│ ├── batches.meta -│ ├── data_batch_1 -│ ├── data_batch_2 -│ ├── data_batch_3 -│ ├── data_batch_4 -│ ├── data_batch_5 -│ ├── readme.html -│ └── test_batch -└── cifar-10-python.tar.gz +datasets-cifar10-bin/cifar-10-batches-bin +├── batches.meta.text +├── data_batch_1.bin +├── data_batch_2.bin +├── data_batch_3.bin +├── data_batch_4.bin +├── data_batch_5.bin +├── readme.html +└── test_batch.bin ``` -Visualize the CIFAR-10 training dataset. +Then, the `mindspore.dataset.Cifar10Dataset` interface is used to load the dataset and perform the associated image transforms. ```python +import mindspore.dataset as ds +import mindspore.dataset.vision as vision +import mindspore as ms import numpy as np + +from mindspore import nn, ops + + +data_dir = "./datasets-cifar10-bin/cifar-10-batches-bin" # Dataset root directory +batch_size = 6 # Batch size +image_size = 32 # Size of training image space +workers = 4 # Number of parallel threads +num_classes = 10 # Number of classes + +def create_dataset_cifar10(dataset_dir, usage, resize, batch_size, workers): + + data_set = ds.Cifar10Dataset(dataset_dir=dataset_dir, + usage=usage, + num_parallel_workers=workers, + shuffle=True) + + trans = [] + if usage == "train": + trans += [ + vision.RandomCrop((32, 32), (4, 4, 4, 4)), + vision.RandomHorizontalFlip(prob=0.5) + ] + + trans += [ + vision.Resize(resize), + vision.Rescale(1.0 / 255.0, 0.0), + vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), + vision.HWC2CHW() + ] + + target_trans = [(lambda x: np.array([x]).astype(np.int32)[0])] + + # Data mapping operation + data_set = data_set.map( + operations=trans, + input_columns='image', + num_parallel_workers=workers) + + data_set = data_set.map( + operations=target_trans, + input_columns='label', + num_parallel_workers=workers) + + # Batch operation + data_set = data_set.batch(batch_size) + + + return data_set + + +# Obtain the processed training and test datasets + +dataset_train = create_dataset_cifar10(dataset_dir=data_dir, + usage="train", + resize=image_size, + batch_size=batch_size, + workers=workers) +step_size_train = dataset_train.get_dataset_size() +index_label_dict = dataset_train.get_class_indexing() + +dataset_val = create_dataset_cifar10(dataset_dir=data_dir, + usage="test", + resize=image_size, + batch_size=batch_size, + workers=workers) +step_size_val = dataset_val.get_dataset_size() +``` + +Visualize the CIFAR-10 training dataset. + +```python import matplotlib.pyplot as plt +import numpy as np -data = next(ds_train.create_dict_iterator()) +data_iter = next(dataset_train.create_dict_iterator()) -images = data["image"].asnumpy() -labels = data["label"].asnumpy() +images = data_iter["image"].asnumpy() +labels = data_iter["label"].asnumpy() print(f"Image shape: {images.shape}, Label: {labels}") +classes = [] + +with open(data_dir+"/batches.meta.txt", "r") as f: + for line in f: + line = line.rstrip() + if line != '': + classes.append(line) + plt.figure() -for i in range(1, 7): - plt.subplot(2, 3, i) - image_trans = np.transpose(images[i - 1], (1, 2, 0)) +for i in range(6): + plt.subplot(2, 3, i+1) + image_trans = np.transpose(images[i], (1, 2, 0)) mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) image_trans = std * image_trans + mean image_trans = np.clip(image_trans, 0, 1) - plt.title(f"{dataset_train.index2label[labels[i - 1]]}") + plt.title(f"{classes[labels[i]]}") plt.imshow(image_trans) plt.axis("off") plt.show() ``` -```python - Image shape: (6, 3, 32, 32), Label: [6 4 4 5 2 1] +```text +Image shape: (6, 3, 32, 32), Label: [5 8 0 3 0 9] ``` -![png](images/output_3_1.png) +![](images/output_6_1.png) ## Building a Network @@ -100,22 +184,24 @@ There are two residual network structures. One is the building block, which is a The following figure shows the structure of the building block. The main body has two convolutional layers. -+ On the first-layer network of the main body. 64 input channels are used. Then, 64 output channels are obtained through the $3\times3$ convolutional layer, the Batch Normalization layer, and the ReLU activation function layer. ++ On the first-layer network of the main body, 64 input channels are used. Then, 64 output channels are obtained through the $3\times3$ convolutional layer, the Batch Normalization layer, and the ReLU activation function layer. + On the second-layer network of the main body, 64 input channels are also used. Then, 64 output channels are obtained through the $3\times3$ convolutional layer, the Batch Normalization layer, and the ReLU activation function layer. Finally, the feature matrix output by the main body is added to the feature matrix output by the shortcut. After the ReLU activation function is used, the final output of the building block is obtained. ![building-block-5](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/tutorials/application/source_zh_cn/cv/images/resnet_5.png) -When adding the feature matrix output by the main body to that output by the shortcut, ensure that the shape of the feature matrix output by the main body is the same as that of the feature matrix output by the shortcut. If the shapes are different, for example, when the number of output channels is twice that of input channels, the number of convolution kernels used by the shortcut for convolution operations is the same as that of the output channels and the size is $1\times1$. If the size of the output image is half of that of the input image, `stride` in the convolution operation of the shortcut must be set to **2**, and `stride` in the first-layer convolution operation of the main body must also be set to **2**. +When adding the feature matrix output by the main body to that output by the shortcut, ensure that the shape of the feature matrix output by the main body is the same as that of the feature matrix output by the shortcut. If the shapes are different, for example, when the number of output channels is twice that of input channels, the number of convolution kernels used by the shortcut for convolution operations is the same as that of the output channels and the size is $1\times1$. If the size of the output image is half of that of the input image, `stride` in the convolution operation of the shortcut must be set to 2, and `stride` in the first-layer convolution operation of the main body must also be set to 2. -The following code defines the `ResidualBlockBase` class to implement the building block structure: +The following code defines the `ResidualBlockBase` class to implement the building block structure. ```python from typing import Type, Union, List, Optional -from mindvision.classification.models.blocks import ConvNormActivation from mindspore import nn +from mindspore.common.initializer import Normal +weight_init = Normal(mean=0, sigma=0.02) +gamma_init = Normal(mean=1, sigma=0.02) class ResidualBlockBase(nn.Cell): expansion: int = 1 # The number of convolution kernels at the last layer is the same as that of convolution kernels at the first layer. @@ -125,12 +211,15 @@ class ResidualBlockBase(nn.Cell): down_sample: Optional[nn.Cell] = None) -> None: super(ResidualBlockBase, self).__init__() if not norm: - norm = nn.BatchNorm2d - - self.conv1 = ConvNormActivation(in_channel, out_channel, - kernel_size=3, stride=stride, norm=norm) - self.conv2 = ConvNormActivation(out_channel, out_channel, - kernel_size=3, norm=norm, activation=None) + self.norm = nn.BatchNorm2d(out_channel) + else: + self.norm = norm + + self.conv1 = nn.Conv2d(in_channel, out_channel, + kernel_size=3, stride=stride, + weight_init=weight_init) + self.conv2 = nn.Conv2d(in_channel, out_channel, + kernel_size=3, weight_init=weight_init) self.relu = nn.ReLU() self.down_sample = down_sample @@ -139,11 +228,14 @@ class ResidualBlockBase(nn.Cell): identity = x # shortcut out = self.conv1(x) # First layer of the main body: 3 x 3 convolutional layer + out = self.norm(out) + out = self.relu(out) out = self.conv2(out) # Second layer of the main body: 3 x 3 convolutional layer + out = self.norm(out) - if self.down_sample: + if self.down_sample is not None: identity = self.down_sample(x) - out += identity # The output is the sum of the main body and the shortcut. + out += identity # output the sum of the main body and the shortcuts out = self.relu(out) return out @@ -151,37 +243,39 @@ class ResidualBlockBase(nn.Cell): #### Bottleneck -The following figure shows the bottleneck structure. With the same input, the bottleneck structure has fewer parameters than the building block structure. Therefore, the bottleneck structure is more suitable for a deep network. The residual structure used by ResNet-50 is bottleneck. The main body of this structure has three convolutional layers, namely, $1\times1$, $3\times3$, and $1\times1$. $1\times1$ is used for dimension reduction and dimension rollup. +The following figure shows the bottleneck structure. With the same input, the bottleneck structure has fewer parameters than the building block structure. Therefore, the bottleneck structure is more suitable for a deep network. The residual structure used by ResNet-50 is bottleneck. The main branch of this structure has three convolutional layers, namely, the $1\times1$ convolutional layer, the $3\times3$ convolutional layer and the $1\times1$ convolutional layer, where the $1\times1$ convolutional layer plays the role of dimensionality reduction and dimensionality enhancement, respectively. + On the first-layer network of the main body, 256 input channels are used. Dimension reduction is performed by using 64 convolution kernels with a size of $1\times1$. Then, 64 output channels are obtained through the Batch Normalization layer and the ReLU activation function layer. + On the second-layer network of the main body, features are extracted by using 64 convolution kernels with a size of $3\times3$. Then, 64 output channels are obtained through the Batch Normalization layer and the ReLU activation function layer. + On the third-layer network of the main body, dimension rollup is performed by using 256 convolution kernels with a size of $1\times1$. Then, 256 output channels are obtained through the Batch Normalization layer. -Finally, the feature matrix output by the main body is added to the feature matrix output by the shortcut. After the ReLU activation function is used, the final output of the bottleneck is obtained. +Finally, the feature matrix output by the main body is added to that output by the shortcut. After the ReLU activation function is used, the final output of the bottleneck is obtained. ![building-block-6](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/tutorials/application/source_zh_cn/cv/images/resnet_6.png) -When adding the feature matrix output by the main body to that output by the shortcut, ensure that the shape of the feature matrix output by the main body is the same as that of the feature matrix output by the shortcut. If the shapes are different, for example, when the number of output channels is twice that of input channels, the number of convolution kernels used by the shortcut for convolution operations is the same as that of the output channels and the size is $1\times1$. If the size of the output image is half of that of the input image, `stride` in the convolution operation of the shortcut must be set to **2**, and `stride` in the second-layer convolution operation of the main body must also be set to **2**. +When adding the feature matrix output by the main body to that output by the shortcut, ensure that the shape of the feature matrix output by the main body is the same as that of the feature matrix output by the shortcut. If the shapes are different, for example, when the number of output channels is twice that of input channels, the number of convolution kernels used by the shortcut for convolution operations is the same as that of the output channels and the size is $1\times1$. If the size of the output image is half of that of the input image, `stride` in the convolution operation of the shortcut must be set to 2, and `stride` in the second-layer convolution operation of the main body must also be set to 2. -The following code defines the `ResidualBlock` class to implement the bottleneck structure: +The following code defines the `ResidualBlock` class to implement the bottleneck structure. ```python class ResidualBlock(nn.Cell): expansion = 4 # The number of convolution kernels at the last layer is four times that of convolution kernels at the first layer. def __init__(self, in_channel: int, out_channel: int, - stride: int = 1, norm: Optional[nn.Cell] = None, - down_sample: Optional[nn.Cell] = None) -> None: + stride: int = 1, down_sample: Optional[nn.Cell] = None) -> None: super(ResidualBlock, self).__init__() - if not norm: - norm = nn.BatchNorm2d - - self.conv1 = ConvNormActivation(in_channel, out_channel, - kernel_size=1, norm=norm) - self.conv2 = ConvNormActivation(out_channel, out_channel, - kernel_size=3, stride=stride, norm=norm) - self.conv3 = ConvNormActivation(out_channel, out_channel * self.expansion, - kernel_size=1, norm=norm, activation=None) + + self.conv1 = nn.Conv2d(in_channel, out_channel, + kernel_size=1, weight_init=weight_init) + self.norm1 = nn.BatchNorm2d(out_channel) + self.conv2 = nn.Conv2d(out_channel, out_channel, + kernel_size=3, stride=stride, + weight_init=weight_init) + self.norm2 = nn.BatchNorm2d(out_channel) + self.conv3 = nn.Conv2d(out_channel, out_channel * self.expansion, + kernel_size=1, weight_init=weight_init) + self.norm3 = nn.BatchNorm2d(out_channel * self.expansion) + self.relu = nn.ReLU() self.down_sample = down_sample @@ -189,10 +283,15 @@ class ResidualBlock(nn.Cell): identity = x # shortcut out = self.conv1(x) # First layer of the main body: 1 x 1 convolutional layer + out = self.norm1(out) + out = self.relu(out) out = self.conv2(out) # Second layer of the main body: 3 x 3 convolutional layer + out = self.norm2(out) + out = self.relu(out) out = self.conv3(out) # Third layer of the main body: 1 x 1 convolutional layer + out = self.norm3(out) - if self.down_sample: + if self.down_sample is not None: identity = self.down_sample(x) out += identity # The output is the sum of the main body and the shortcut. @@ -220,19 +319,25 @@ The following example defines `make_layer` to build residual blocks. The paramet ```python def make_layer(last_out_channel, block: Type[Union[ResidualBlockBase, ResidualBlock]], channel: int, block_nums: int, stride: int = 1): - down_sample = None # shortcut + down_sample = None # shortcuts + if stride != 1 or last_out_channel != channel * block.expansion: - down_sample = ConvNormActivation(last_out_channel, channel * block.expansion, - kernel_size=1, stride=stride, norm=nn.BatchNorm2d, activation=None) + + down_sample = nn.SequentialCell([ + nn.Conv2d(last_out_channel, channel * block.expansion, + kernel_size=1, stride=stride, weight_init=weight_init), + nn.BatchNorm2d(channel * block.expansion, gamma_init=gamma_init) + ]) layers = [] - layers.append(block(last_out_channel, channel, stride=stride, down_sample=down_sample, norm=nn.BatchNorm2d)) + layers.append(block(last_out_channel, channel, stride=stride, down_sample=down_sample)) in_channel = channel * block.expansion # Stack residual networks. for _ in range(1, block_nums): - layers.append(block(in_channel, channel, norm=nn.BatchNorm2d)) + + layers.append(block(in_channel, channel)) return nn.SequentialCell(layers) ``` @@ -248,35 +353,41 @@ ResNet-50 has five convolution structures, one average pooling layer, and one fu The following sample code is used to build a ResNet-50 model. You can call the `resnet50` function to build a ResNet-50 model. The parameters of the `resnet50` function are as follows: -+ `num_classes`: number of classes. The default value is **1000**. -+ `pretrained`: Used to download the corresponding training model and load the parameters in the pre-trained model to the network. ++ `num_classes`: number of classes. The default value is 1000. ++ `pretrained`: download the corresponding training model and load the parameters in the pre-trained model to the network. ```python -from mindvision.classification.models.classifiers import BaseClassifier -from mindvision.classification.models.head import DenseHead -from mindvision.classification.models.neck import GlobalAvgPooling -from mindvision.classification.utils.model_urls import model_urls -from mindvision.utils.load_pretrained_model import LoadPretrainedModel +from mindspore import load_checkpoint, load_param_into_net class ResNet(nn.Cell): def __init__(self, block: Type[Union[ResidualBlockBase, ResidualBlock]], - layer_nums: List[int], norm: Optional[nn.Cell] = None) -> None: + layer_nums: List[int], num_classes: int, input_channel: int) -> None: super(ResNet, self).__init__() - if not norm: - norm = nn.BatchNorm2d + + self.relu = nn.ReLU() # At the first convolutional layer, the number of the input channels is 3 (color image) and that of the output channels is 64. - self.conv1 = ConvNormActivation(3, 64, kernel_size=7, stride=2, norm=norm) + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, weight_init=weight_init) + self.norm = nn.BatchNorm2d(64) # Maximum pooling layer, reducing the image size self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') - # Definition of each residual network structure block + # Define each residual network structure block self.layer1 = make_layer(64, block, 64, layer_nums[0]) self.layer2 = make_layer(64 * block.expansion, block, 128, layer_nums[1], stride=2) self.layer3 = make_layer(128 * block.expansion, block, 256, layer_nums[2], stride=2) self.layer4 = make_layer(256 * block.expansion, block, 512, layer_nums[3], stride=2) + # average pooling layer + self.avg_pool = nn.AvgPool2d() + # flattern layer + self.flatten = nn.Flatten() + # fully-connected layer + self.fc = nn.Dense(in_channels=input_channel, out_channels=num_classes) def construct(self, x): + x = self.conv1(x) + x = self.norm(x) + x = self.relu(x) x = self.max_pool(x) x = self.layer1(x) @@ -284,79 +395,137 @@ class ResNet(nn.Cell): x = self.layer3(x) x = self.layer4(x) - return x + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x +``` -def _resnet(arch: str, block: Type[Union[ResidualBlockBase, ResidualBlock]], - layers: List[int], num_classes: int, pretrained: bool, input_channel: int): - backbone = ResNet(block, layers) - neck = GlobalAvgPooling() # Average pooling layer - head = DenseHead(input_channel=input_channel, num_classes=num_classes) # Fully-connected layer - model = BaseClassifier(backbone, neck, head) # Connect the backbone layer, neck layer, and head layer. +```python +def _resnet(model_url: str, block: Type[Union[ResidualBlockBase, ResidualBlock]], + layers: List[int], num_classes: int, pretrained: bool, pretrained_ckpt: str, + input_channel: int): + model = ResNet(block, layers, num_classes, input_channel) if pretrained: - # Download and load the pre-trained model. - LoadPretrainedModel(model, model_urls[arch]).run() + # load pre-trained models + download(url=model_url, path=pretrained_ckpt) + param_dict = load_checkpoint(pretrained_ckpt) + load_param_into_net(model, param_dict) return model def resnet50(num_classes: int = 1000, pretrained: bool = False): - "ResNet-50 model" - return _resnet("resnet50", ResidualBlock, [3, 4, 6, 3], num_classes, pretrained, 2048) + "ResNet50 model" + resnet50_url = "https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/models/application/resnet50_224_new.ckpt" + resnet50_ckpt = "./LoadPretrainedModel/resnet50_224_new.ckpt" + return _resnet(resnet50_url, ResidualBlock, [3, 4, 6, 3], num_classes, + pretrained, resnet50_ckpt, 2048) ``` ## Model Training and Evaluation -In this part, [a ResNet-50 pre-trained model](https://download.mindspore.cn/vision/classification/resnet50_224.ckpt) is used for fine-tuning. Call `resnet50` to build a ResNet-50 model and set `pretrained` to **True**. The ResNet-50 pre-trained model is automatically downloaded and the parameters of the pre-trained model are loaded to the network. Define an optimizer and a loss function, train the network by using the `model.train` API, and transfer the `mindvision.engine.callback.ValAccMonitor` API in MindSpore Vision to the callback function. The loss value and evaluation accuracy of the training are printed, and the CKPT file (**best.ckpt**) with the highest evaluation accuracy is saved to the current directory. +In this part, [a ResNet-50 pre-trained model](https://download.mindspore.cn/vision/classification/resnet50_224.ckpt) is used for fine-tuning. Call `resnet50` to build a ResNet50 model and set `pretrained` to True. The ResNet50 pre-trained model is automatically downloaded and the parameters of the pre-trained model are loaded to the network. Define the optimizer and loss function, print the loss values and evaluation accuracy of the training epoch by epoch, and save the ckpt file with the highest evaluation accuracy (resnet50-best.ckpt) to . /BestCheckPoint of the current path. ```python -from mindspore.train import Model, Accuracy -from mindvision.engine.callback import ValAccMonitor - -# Define the ResNet-50 network. +import mindspore as ms +# Define the ResNet50 network. network = resnet50(pretrained=True) # Size of the input layer of the fully-connected layer -in_channel = network.head.dense.in_channels -head = DenseHead(input_channel=in_channel, num_classes=10) +in_channel = network.fc.in_channels +fc = nn.Dense(in_channels=in_channel, out_channels=10) # Reset the fully-connected layer. -network.head = head -# Set the learning rate. +network.fc = fc + +for param in network.get_parameters(): + param.requires_grad = True +``` + +```text +Replace is False and data exists, so doing nothing. Use replace=True to re-download the data. +``` + +```python +# Set the learning rate num_epochs = 40 -lr = nn.cosine_decay_lr(min_lr=0.00001, max_lr=0.001, total_step=step_size * num_epochs, - step_per_epoch=step_size, decay_epoch=num_epochs) -# Define an optimizer and a loss function. +lr = nn.cosine_decay_lr(min_lr=0.00001, max_lr=0.001, total_step=step_size_train * num_epochs, + step_per_epoch=step_size_train, decay_epoch=num_epochs) +# Define optimizer and loss function opt = nn.Momentum(params=network.trainable_params(), learning_rate=lr, momentum=0.9) -loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') -# Instantiate the model. -model = Model(network, loss, opt, metrics={"Accuracy": Accuracy()}) -# Perform model training. -model.train(num_epochs, ds_train, callbacks=[ValAccMonitor(model, ds_val, num_epochs)]) +loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') + + +def forward_fn(inputs, targets): + logits = network(inputs) + loss = loss_fn(logits, targets) + + return loss + +grad_fn = ops.value_and_grad(forward_fn, None, opt.parameters) + +def train_step(inputs, targets): + loss, grads = grad_fn(inputs, targets) + opt(grads) + return loss + +# Instantiate models +model = ms.Model(network, loss_fn, opt, metrics={"Accuracy": nn.Accuracy()}) ``` -```Text --------------------- -Epoch: [ 0 / 40], Train Loss: [2.733], Accuracy: 0.274 --------------------- -Epoch: [ 1 / 40], Train Loss: [2.877], Accuracy: 0.319 --------------------- -Epoch: [ 2 / 40], Train Loss: [2.438], Accuracy: 0.249 --------------------- -Epoch: [ 3 / 40], Train Loss: [1.532], Accuracy: 0.386 - -······ - -Epoch: [ 37 / 40], Train Loss: [1.142], Accuracy: 0.738 --------------------- -Epoch: [ 38 / 40], Train Loss: [0.402], Accuracy: 0.727 --------------------- -Epoch: [ 39 / 40], Train Loss: [2.031], Accuracy: 0.735 --------------------- -Epoch: [ 40 / 40], Train Loss: [0.582], Accuracy: 0.745 -================================================================================ -End of validation the best Accuracy is: 0.754, save the best ckpt file in ./best.ckpt +```python +# Creating Iterators +data_loader_train = dataset_train.create_tuple_iterator(num_epochs=num_epochs) +data_loader_val = dataset_val.create_tuple_iterator(num_epochs=num_epochs) + +# Optimal model storage path +best_acc = 0 +best_ckpt_dir = "./BestCheckpoint" +best_ckpt_path = "./BestCheckpoint/resnet50-best.ckpt" +``` + +```python +import os + +# Start circuit training +print("Start Training Loop ...") +for epoch in range(num_epochs): + losses = [] + network.set_train() + + # Read in data for each training round + + for i, (images, labels) in enumerate(data_loader_train): + loss = train_step(images, labels) + if i%100 == 0 or i == step_size_train -1: + print('Epoch: [%3d/%3d], Steps: [%3d/%3d], Train Loss: [%5.3f]'%( + epoch+1, num_epochs, i+1, step_size_train, loss)) + losses.append(loss) + + # Verify the accuracy after each epoch + + acc = model.eval(dataset_val)['Accuracy'] + + print("-" * 50) + print("Epoch: [%3d/%3d], Average Train Loss: [%5.3f], Accuracy: [%5.3f]" % ( + epoch+1, num_epochs, sum(losses)/len(losses), acc + )) + print("-" * 50) + + if acc > best_acc: + best_acc = acc + if not os.path.exists(best_ckpt_dir): + os.mkdir(best_ckpt_dir) + if os.path.exists(best_ckpt_path): + os.remove(best_ckpt_path) + ms.save_checkpoint(network, best_ckpt_path) + +print("=" * 80) +print(f"End of validation the best Accuracy is: {best_acc: 5.3f}, " + f"save the best ckpt file in {best_ckpt_path}", flush=True) ``` ## Visualizing Model Prediction Results @@ -365,33 +534,40 @@ Define the `visualize_model` function, use the model with the highest validation ```python import matplotlib.pyplot as plt -import mindspore as ms -from mindspore.train import Model -def visualize_model(best_ckpt_path, val_ds): +def visualize_model(best_ckpt_path, dataset_val): num_class = 10 # Perform binary classification on wolf and dog images. net = resnet50(num_class) # Load model parameters. param_dict = ms.load_checkpoint(best_ckpt_path) ms.load_param_into_net(net, param_dict) - model = Model(net) + model = ms.Model(net) # Load the validation dataset. - data = next(val_ds.create_dict_iterator()) + data = next(dataset_val.create_dict_iterator()) images = data["image"].asnumpy() labels = data["label"].asnumpy() # Predict the image type. output = model.predict(ms.Tensor(data['image'])) pred = np.argmax(output.asnumpy(), axis=1) + # Image classification + classes = [] + + with open(data_dir+"/batches.meta.txt", "r") as f: + for line in f: + line = line.rstrip() + if line != '': + classes.append(line) + # Display the image and the predicted value of the image. plt.figure() - for i in range(1, 7): - plt.subplot(2, 3, i) + for i in range(6): + plt.subplot(2, 3, i+1) # If the prediction is correct, the color is blue. If the prediction is incorrect, the color is red. - color = 'blue' if pred[i - 1] == labels[i - 1] else 'red' - plt.title('predict:{}'.format(dataset_val.index2label[pred[i - 1]]), color=color) - picture_show = np.transpose(images[i - 1], (1, 2, 0)) + color = 'blue' if pred[i] == labels[i] else 'red' + plt.title('predict:{}'.format(classes[pred[i]]), color=color) + picture_show = np.transpose(images[i], (1, 2, 0)) mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) picture_show = std * picture_show + mean @@ -402,7 +578,7 @@ def visualize_model(best_ckpt_path, val_ds): plt.show() # Use the test dataset for validation. -visualize_model('best.ckpt', ds_val) +visualize_model(best_ckpt_path=best_ckpt_path, dataset_val=dataset_val) ``` ![png](images/output_161_0.png) diff --git a/tutorials/application/source_zh_cn/cv/resnet50.ipynb b/tutorials/application/source_zh_cn/cv/resnet50.ipynb index 7774028aed..298513b061 100644 --- a/tutorials/application/source_zh_cn/cv/resnet50.ipynb +++ b/tutorials/application/source_zh_cn/cv/resnet50.ipynb @@ -31,7 +31,7 @@ "source": [ "## 数据集准备与加载\n", "\n", - "[CIFAR-10数据集](http://www.cs.toronto.edu/~kriz/cifar.html)共有60000张32*32的彩色图像,分为10个类别,每类有6000张图,数据集一共有50000张训练图片和10000张评估图片。首先,如下示例使用`download`接口下载并解压,目前仅支持解析二进制版本的CIFAR-10文件(CIFAR-10 binary version)" + "[CIFAR-10数据集](http://www.cs.toronto.edu/~kriz/cifar.html)共有60000张32*32的彩色图像,分为10个类别,每类有6000张图,数据集一共有50000张训练图片和10000张评估图片。首先,如下示例使用`download`接口下载并解压,目前仅支持解析二进制版本的CIFAR-10文件(CIFAR-10 binary version)。" ] }, { -- Gitee