From 2d63db80d9e5b958a5ff2377f9d513bda4b5dae1 Mon Sep 17 00:00:00 2001 From: pengxiaopeng <958876660@qq.com> Date: Tue, 4 Jun 2024 10:54:27 +0800 Subject: [PATCH] =?UTF-8?q?=E8=AE=AD=E7=BB=83=E7=8A=B6=E6=80=81=E7=9B=91?= =?UTF-8?q?=E6=8E=A7=E5=B7=A5=E5=85=B7=E6=96=B0=E7=89=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- debug/accuracy_tools/kj600/README.md | 52 ++- debug/accuracy_tools/kj600/img/cpu_info.png | Bin 0 -> 13826 bytes debug/accuracy_tools/kj600/img/train.png | Bin 0 -> 8366 bytes .../kj600/img/train_with_kj600.png | Bin 0 -> 8350 bytes .../kj600/kj600/anomaly_detect.py | 86 +++++ .../kj600/kj600/anomaly_inform.py | 75 ++++ .../kj600/distributed/distributed_ops.yaml | 14 + .../kj600/distributed/wrap_distributed.py | 156 +++++++++ debug/accuracy_tools/kj600/kj600/features.py | 20 ++ .../accuracy_tools/kj600/kj600/module_hook.py | 321 ++++++++++++------ .../kj600/kj600/module_metric.py | 125 +++++++ .../kj600/kj600/optimizer_collect.py | 74 ++-- .../kj600/kj600/unittest/config_1.json | 8 - .../kj600/kj600/unittest/test_features.py | 33 -- .../kj600/kj600/unittest/test_module_hook.py | 78 ----- debug/accuracy_tools/kj600/kj600/utils.py | 47 +++ ...47\350\203\275\345\237\272\347\272\277.md" | 52 +++ 17 files changed, 890 insertions(+), 251 deletions(-) create mode 100644 debug/accuracy_tools/kj600/img/cpu_info.png create mode 100644 debug/accuracy_tools/kj600/img/train.png create mode 100644 debug/accuracy_tools/kj600/img/train_with_kj600.png create mode 100644 debug/accuracy_tools/kj600/kj600/anomaly_detect.py create mode 100644 debug/accuracy_tools/kj600/kj600/anomaly_inform.py create mode 100644 debug/accuracy_tools/kj600/kj600/distributed/distributed_ops.yaml create mode 100644 debug/accuracy_tools/kj600/kj600/distributed/wrap_distributed.py create mode 100644 debug/accuracy_tools/kj600/kj600/module_metric.py delete mode 100644 debug/accuracy_tools/kj600/kj600/unittest/config_1.json delete mode 100644 debug/accuracy_tools/kj600/kj600/unittest/test_features.py delete mode 100644 debug/accuracy_tools/kj600/kj600/unittest/test_module_hook.py create mode 100644 debug/accuracy_tools/kj600/kj600/utils.py create mode 100644 "debug/accuracy_tools/kj600/\350\256\255\347\273\203\347\212\266\346\200\201\347\233\221\346\216\247\345\267\245\345\205\267\346\200\247\350\203\275\345\237\272\347\272\277.md" diff --git a/debug/accuracy_tools/kj600/README.md b/debug/accuracy_tools/kj600/README.md index 05fcb4a215..b39db6a007 100644 --- a/debug/accuracy_tools/kj600/README.md +++ b/debug/accuracy_tools/kj600/README.md @@ -1,8 +1,8 @@ -# kj600 模型训练状态监控工具 +# TensorProbe (codename:kj600) 模型训练状态监控工具 ## 简介 -本项目开发了名为kj600的模型训练状态监控工具,能够收集和聚合模型训练过程中的层和优化器的中间状态,帮助诊断模型训练过程中出现的异常情况。 +本项目开发了一个模型训练状态监控工具,能够收集和聚合模型训练过程中的网络层,优化器, 通信算子的中间值,帮助诊断模型训练过程中计算, 通信,优化器各部分出现的异常情况。 ## 安装 @@ -41,16 +41,27 @@ pip install -e . "targets": { "language_model.encoder.layers.0": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"} }, - "module_ranks": "1,2,3,4", - "ur_distribution": true + "print_struct": false, + "module_ranks": [1,2,3,4], + "ur_distribution": true, + "xy_distribution": true, + "mv_distribution": true, + "wg_distribution": true, + "mg_direction": true, + "cc_distribution": {"enable":true, "cc_codeline":[]}, + "alert": { + "rules": [{"rule_name": "AnomalyTurbulence", "args": {"threshold": 0.5}}] + }, + "ops": ["min", "max", "norm", "zeros", "id"], + "eps": 1e-8 } ``` -每个要监控的module有特定的输入输出格式(依赖于模型实现),所以我们需要指定前向输入输出格式和反向计算时输入张量的梯度和输出张量的梯度格式。 如果不清楚的话可以先猜测, 格式规范与实际输入不同时会报详细错误。 我们也会随时更新更多常用module的格式规范。 +每个要监控的module有特定的输入输出格式(依赖于模型实现),所以我们需要指定前向输入输出格式和反向计算时输入张量的梯度和输出张量的梯度格式。 如果不清楚的话可以填空字段("targets":{}),然后将 "print_struct" 字段设置为 true, 之后工具会打印详细的模型结构。 我们也会随时更新更多常用module的格式规范。 下面详细解释各个字段: -"targets":必选字段,指定需要监控的大模型层, 例如transformer的第0层language_model.encoder.layers.0。如果不清楚层命名, 可以使用空的json配置文件, 之后监控工具会打印模型中torch module的名字, 你可以从中选择你关心的module。 +"targets":必选字段,指定需要监控的大模型层, 例如transformer的第0层language_model.encoder.layers.0。如果不清楚模型结构, 可以填空字段("targets":{}),然后将 "print_struct" 字段设置为 true, 之后监控工具会打印模型中torch module的名字和详细结构,并在第1个step后退出, 你可以从中选择你关心的module。 "input":可选字段,"tuple[2]:0"的意思是目标module的前向input参数为长度为2的tuple, 我们关心的是tuple第0个元素。 @@ -62,9 +73,25 @@ pip install -e . "module_ranks":可选字段,用于在分布式训练场景中希望控制在哪些rank开启module监控。如果不填,则默认在所有rank开启。 -"ur_distribution": 可选字段,若为true则会统计adam优化器的update和ratio的数值分布,并展示在heatmap里,默认为false。 +"print_struct":可选字段,设置为true后监控工具会打印模型中torch module的名字和详细结构,并在第1个step后退出。不填默认为false。 + +"ur_distribution": 可选字段,若为true则会统计adam优化器指定模块(targets中指定)参数的update和ratio向量的数值分布,并展示在heatmap里,默认为false。 + +"xy_distribution": 可选字段, 若为true则会监控指定module(targets中指定)的输入输出张量。 默认为false。 + +"mv_distribution": 可选字段, 若为true则会监控指定模块中的参数的优化器状态, 默认为false。 + +"wg_distribution": 可选字段, 若为true则会监控指定模块的参数梯度, 默认为false。 + +"alert": 必选字段。 指定自动报警的异常检测机制及其相应的阈值。目前实现的异常检测是AnomalyTurbulence。 如果统计标量超出历史均值的指定浮动范围(threshold指定, 0.5意味着上浮或者下浮50%)。 目前报警是在控制台打印, 未来会实现发邮件和写数据库。 + +"mg_direction": 可选字段,若为true则会统计adam优化器的一阶矩($m_{t-1}$)和当前梯度($g_t$)符号一致的参数比例。 + +"cc_distribution": 可选字段, 其中“enable”字段控制开关;“code_line”字段指定监控的代码行,默认为空列表,不特别指定。!!开启后, 会在监控过程让异步通信同步。 + +"ops": 可选字段,与ur_distribution、xy_distribution、mv_distribution、wg_distribution、mg_direction、cc_distribution配合,监控所选张量的min、max、norm、zeros值。其中,zeros代表监控所选张量的元素小于eps的比例,id代表监控所选的非张量本身,默认为[]。 -"mg_direction": 可选字段,若为true则会统计adam优化器的动量与当前梯度方向一致的参数比例。 +"eps": 可选字段,若ops里包含"zeros"则需要配置,默认为1e-8。 下面给出transformer架构模型中常见的module的前向计算的输入输出和反向计算输入张量的梯度和输出张量的梯度格式,以供参考: @@ -98,11 +125,14 @@ pip install -e . ``` from kj600.module_hook import TrainerMon - hooker = TrainerMon("./llama2_config.json") - hooker.hook_modules(model=model, global_batch_size=args.global_batch_size, dp=args.data_parallel_size, micro_batch_size=args.micro_batch_size, fwd_or_bkd=0) + hooker = TrainerMon("./llama2_config.json", params_have_main_grad=True, opt_ty="Megatron_DistributedOptimizer") # or opt_ty=Megatron_Float16OptimizerWithFloat16Params + hooker.hook_modules(model=model, grad_acc_steps=args.global_batch_size//args.data_parallel_size//args.micro_batch_size) ``` + params_have_main_grad: 若为True则参数权重梯度为main_grad,否则为grad,默认为True。 + + 如果不是Megatron-LM的训练框架, 可以设置对应的梯度累积步数grad_acc_steps。 - 如果要监控混合精度优化器的动量和方差, 需要在混合精度优化器构造后加入如下代码: + 如果要监控混合精度优化器的动量和方差, 需要在混合精度优化器构造后加入如下代码。 目前只支持Megatron_DistributedOptimizer, 使用bf16或者fp16混合精度时开启分布式优化器。 或者Megatron_Float16OptimizerWithFloat16Params, 使用bf16或者fp16混合精度选项并且不开启分布式优化器。 ``` model, optimizer, opt_param_scheduler = setup_model_and_optimizer( diff --git a/debug/accuracy_tools/kj600/img/cpu_info.png b/debug/accuracy_tools/kj600/img/cpu_info.png new file mode 100644 index 0000000000000000000000000000000000000000..c69eb61b11be5901428fd20b3d5f69909efffafb GIT binary patch literal 13826 zcmbVy2T)V*w{7Sh=^#ZQNN*xY?*<40f`D{_QbbDVy@P;+YUmw>ARtY8FQO7U8cOIL z5s=6000!4V30llfP=!m zUl9{x|1vGd@*Mkt*`vLOaS{R@`)NfISXNP|Lfm7)*V-itBXmeGIPC= z*i+7~nqpRbJ2pdaLvF`rhCH{lDdD)?w4W!%Zr!ZQN;+?VDD+Y@IFE0VW!Lrh;$CP# zn&^z3a<~Zef#hBx1R)(@RIOhY@?c$8HlyshqFkdyl#k=pLY8)-2 zlfjK>CPAQE?WpS2+R+Yuee7>RVz;Yyn(GEJ^Q2<#$#hotoD*hdWV>a_OgS)2S9Ovg zYdc+XlGP%!(#AZ;HfLnC3t8J0=r`S4((tBDnos|Lu2mtA2bvt#KU=KpASOMgY|^OU zL+$*frJwUp(Eu>Wh^%+Pppd0588BUQY2)LS`25tIb~B)&_mM<`4As?R#QtW5fAaSh zeJLTROS#R>VrZArhiWi?zfjlw`GBOGC=#@V%P+jpPtM;+f|D6{-*w|VC%(`F5_>NE z4NiEk^3#!*YPschLy5m?dO>Dg(T!qw%H=M9;y2z`@*E${pOrn09Xsf2jR0FkJO)nS z))LeCzCpgxvZfj7jR)IsY<~+Dvhm5~K}|lYPq@3p;x1#f7Ny^0_IEQaxl*;Vh2v77 zOBz-xhvqEPCAH+RKm#~KxX|bLY4;*g=LH!&$9DSOnsee_rq66VPFIH8f`z0fTN;XIZ-UznND1uGG$!5N9(uS^+*6( zOa!$>&WHbsoZ&G*{o)r+<1tq#I5Zu+df-^lCU%y;e`zx^^E{Dz^vi9aLqUGVI&*d} zD=~B%O|&Hb&inBoc2)CjlfKd5Lt)2H(X+gpc=O`n zLbXZuAw(sRu8BBuootPJJEK=e@IFqrx)VG*qIoHzL7|<%0O*aATG0b%czKb8rg2?utoCh&mF6>#uPn8j_{haX2l3}0jvM*@_B*3(x z7_S=#pN;Vzz&<%4Po(*7b1)Tuyr(ik^5^E4wB5j3aFek(j=Qr|KJ?}Jl*@jV6Lvr zlXAL@8sd3R)AB zVuv=)@&RJs1&)*9beGe58018L^amb=sv{m6*UNizpmf*F;H@tf`iqy|bo?m+7Bc6B zC18GAzeAF$-0R)eyc~&4-OaiG)2i+%535|^!?UZtc5BaAzAau%Gg_>& z6rRR`WSi~YpRKc?Bwu?LYi9mQ6@)10!-4l&&rwQSTJWd6sE#dFFE3Ew*#4sECKLzO zS^YV?hHgl4<0ne44i44dbSl7WmlWx=+r96<;#nDGS?Va_Q0Vjbw%&nPhtt2*DL#Aj zotw`}Gn!-j5AT5{oDe##D|1g&Jf#%K1E-AVmGQmZ3(8T-S9_heCiJ_0kK2<8{i6Y| zIfbeykUT@iWa+eqOyZ%Dc|c3Rdyy|=GCcYk#4#ryiBJ(%gc*q$4ye&DfUT3dI7j<) z*dQM&?Z$r{J$h2a?U8B}>tTsGsK5L9VR~Ax_wGWQ^v5@6q)QC8yT>cf2bJ*5Li%S4 zf4zU)KYjkbR!otnIfDe=$!Jv5CEAfyMQyC@#fb+{CIMpZuV|l^YJa`?s=EaMSdjxU z61n6_3eT$0#4uby{K2EY1a1EJWaVAzavsk3h_jH~?DjRSYeK12`dgfSrMIueSH@rO z!1KnMsP~yIVIAwgM?h6f9w;3R1fV+F%74hvIDqE!IG-n*oWpV|gZ>Phc8d)-AtQqN z6QNUa$3Fu+pYXKH>Mri(z!Gj4uNVjFo-04f$%waTax?cn3Pu(b2Ojfw7oJ6OGy@jqfCRzwn8pA>#;dK>s%wJ1wvTI?j*XFZb^Y}Azwf}v_sylh& zbU;NVbX^>v_aH{%*K_VOp+}CTi88Xlyh?AugaF3rEr<_AYH&b9l_!Oit(-e_Xx0DO z2(?4=)K!Wlx5=Cu%@_L9O2YP8-mdrAedhJAo|#(p>&%U)C5qN-BF&Fo@7ArG)Ix>9 zILZ$WL5?Zrb)s4<8mlJhtuIwJHC8Syodza-nKJZ?vKw5A+#%Zy{f!yOhY1@|GJ6>b z-w2KwCU(XsQ&ZEy^Y#a?_`_4WZYD-K_ZFpp?be`n01tZGV8^*; z8^jI895NqyURNeII9Wc~U?#Oy!W^_q@^;KnFp9}Ja7LLaOW^%D`#|_TLRfP~>b_jY zuJ%*ik@nEl%r+!$*lK$Yc(lx$@-cKXZcu+)e+K=7O*hwI<_<7{FFdu;lTod$j}ZP; z>BDn8t+{JjwHY>XAVD*gg0VM515whR5FiE*fc;v9V>nPTWcS^V8ll^2RdZ9X06w^L z+T7svngn1P=3E&jH7ETFMC!)5$AhYKobQ1Fq3G7snj=*N*}CT2IPP`kCQ&KGwa|*I z$)hj}y`j)@CW=antA?5sa5u;!le z@lhI(P1WK7HcRQQn8C9{U9eeeKi>j~I(BEr5~rKj6H{)9{qG&Zf0Nr5bP>CPk9woX z@wYQVN{lJ6xeuI4Q)aQumzLUk`K-}H4N4-~WyuKH7ROr-tn8y(I&zrc)vzPrXi)5X z9%o`lPD2?hA#{Nd+NAq#B62~OkrP|anh;xGqrOfo;-9GS6Sq5XsdF%YvVdc_Vu8un z@@O%x1S1e=-7n7eS? zAg0c$R9(biBYrUM?J9SBgWSe2QxGLKrQjbnLX{C6Sw&Yte@@7%eQ`R8GWI%{sU2rU zX7%d6g>;JAlFFfD#|f67D$dr$)!!cd+@IlLMIPG5f^jrzVVk`GpK10%%VAxbBwEy$}n!Rt<`^y^aFsGVPt zf8%yfI6i(DBXF`boHbFm+SHP`;l-j+`Lq3DR@yQy{Bh=r@MDruAS!KVLJL82ej&)X zQ-?@WR}!4B;nWY4`dFdE|14H*zH}nejOQG-A4( zx~{uycw$(hWaOe3dgbq3$i6n=Yb1#hDfe2WhF?03$$xBDcU%lNSd;MW8uuY{*WO|m zn9?IP`cv*Bbk1=+=PuLaytn=;hYJ1OKmdq&6|FWCaHzRX^~UFpG&kVwYr0Ye4Gu7MY@O%8_Kw!RyhWwBVVrU?$tH00WtS1v=RQSs~1SB6osH0yWnqA zPxw5LD8hww1GVfMc@^64bKA(4o;h}1ZAt1mHGQ8445uFQoGMLFJmAi-Pa278(lQC? z%N*um(zJm7`MugH&7(e=6sQH{c;(?Y+au|8$$d$SE6@xb`U&x$NAD#aRb-7buU1Kb z5-wY@qih>)dY|HDJ+w zx-A>GCdZL-dVpz=)_3#ywVM$wiw*PyY0a|xT$FETNAHygVy+t-!q1k%zztc3cbm=L zwKswpReOd=Y&q$Ccl;`0(TH})@^vgm2+dPGc0(sKJYSt`HoJfPb9+@;EK-VRZciaP z!5s)|k^fvUI_q;|GShv9<*lF?l_cnW9ye-5nx(Bz7@MkL1X9IZ@_BmaDIxkQ=Ac$o z+laqhaAPH;K>g+CM4hi5qLfW#2{H_A6x_X3n`fLJ{y`BIzucarzMw9Dvio;g4->?` zW4!$QBe(Awj1K{TRIKb<>2@lO|9pE~4h4jX;=)6ipGG6|`OwC#U^>7H^8af^W5I*U z5QOroDPshf9`0jxLAg(f##yS{r+sOa(SyijjU;O^LTuAoo(h=s z^#?b)(8fI;1|-sy_?josDZR;*%LhQBN`=AybB^ldsc$$R!Z=_^xdS~H4mC4{cA+>4=00bfo4iZ zvPLKUb-bMm8w4Rj8X-olWKQfwi|O&x9>GHBX%L-Nz;HS=31f1T{c-i#smyk#hIMER z9cii=!0lmE=Zx?-deRr2U3+7UQ?Zd2KcDAE(j3}E+0sNA(U-BLOr;mno!@(o4i2n4D`pxNme*WZaB!8e@QCHOWY=uuOXD$6Mdd zVR5r>ayxfoWb`4ALp$nh?pBSO0_PJ85*%c1*D?~Wy>*g`>1tDUuhd`1`F+5L_nfqM zf{m>|X#d1d3Hf!Z$$ghbpx&LJ_4Vq(>Nv7NOZLOF?uP9bb{UvNf%Juw8i+mZh4=WS za232=aq{4JM62;Jf=cg5!p&Bn?1!~JqTtE9@6Ipt>89c%aR5vAs2mdZpPYWiRstW5 zQ|PfiQTO&6u(@s5lz6Yem%FekY(*Z1Ci*wI2YWc68^3@b#e3VhBV{1eiUZhR)`pD- zD{z^Ltv$kDGnSE+Pp9p?y%^3FK)H#Uzz8X6tzVZLH0(vWmcJGwiFBuoH8l5}VEnnk z(1A9A|ISfC>{^15jxl_W%m0+Ea?mW|PrsT^^*cvt>ynmme&V?OBy2?tm z*p*5vL)98om|AUb_~k+|do_90w3J~J2Hd^hp8wcKp1Uk?!>&V-L1;r{ z){uwh0vnOqpTKuR^*Gw35?-$tdMTsN^p6cKSk(*Nr6s+GraUPVaWK+&_hcgqb z#=SJNEh0}=)*(GJvbDRd(t%V=lzd-kj$d66i)rZ*$;Mbi{uj%qE~$)ba`&&nHx zFT@>Nz$ffDxjwpF2eO>{Zr|oGBnQlztCK0;^WutXDU_4qKxY1qX;;y>nP+Lc=J2d- zUeRy!_w{0Z?=&>C{3SqM+d+FPnscK=gCxE_+y3B}{_vCNE7)^iNxXD#_4J@lg6RONtJG}{_M;iKQPJ$(UR^wy28H(lKuG@;r7lcL_MGuoP&%dzaL(Z6<`+J=0gNwYy@qk zO!+}dU|t#uXJcxub8q$P2ezf$bgB7S3mI(seT18+_*vJ&z&2to1Qy)y>|XwfRn2-d zDXr%SKg6INjqp4p>nv-UbGW(QgErpA{8>d*KkJn;`{fe_?r9qK@?8mdS6Dc zPF+}Ptx4Q#p#Via1CmJG>|Gn`D6{xSgrxOFVOqiMe%ofq_oKc> z;hj%i&UClJBV$3jY1<#EFY6Wq&;3-j|6G)Ruj}4@02Iw8xZ9!(D%+PBBlCu3! z7Yk$)97H{)!aroPu+fi8J-GS_xC$hjA&sJN&BiErG7pW3&V z%p#zV`MqkYdohuspk5PEMTX$XZ0RzNdO7T1T(JC4n50?##~ji zZU3dw-trNsb;Z{256mNNDIR3}muzSIS^uiEN&Za|@$-i}TJ3iw@ZO$^0s!rbNHqat zik9Eo_ex?Vf;7e(|5({!Oa*y^*y#aJUz9(uFvCY<^CHA$MH5PoB+#l?&_GnzsL}w2 zl0_U~;ALjYnB)8XO#ac}QF)#?ae#c1aE7S{qsUq_SlHu1=Z$ym^vt@hTk&|06~1X7 zB!^Jb5qbER9DQqt8|WZz!BM9V0vq}5HdazP+JMftYe?}JI+3-hQ7|oh9DjK|?TJ{Y zwdR|a+R97&mU^YthNa#VGLvAMdU^Qi3{!!ludKB?0+8`;0)S0u9@iNO$-H0n@zbov zsc8i4%xhs1f6PfsfC6a9F%A!YyjGF3upf3(F!)#MWmHjQM>_b z(2OL6vKefgSFu&Q;OwddIz@t)Rja5sf0WPrxYz`{`c9TF8qe?XX% zCU3JWG5yGT7?@F$0Y&^!tMTf^Q~M~poSg>eMyP`^#;)kxm6PZBXXa%;olVM^>7NdZ zw4O|Ix)lJ%9$J2jz8YQ>$J^lOk~oeh)f2C1e@=>oau+EyxQLI`ckvQ;{X&{ zKr=o4`%|>*mroe6HE59H|HEkIeJ{)`M&89q4%kn}7JVkKs`)NM0$V3tT7>c!cjH2S zYgAm)8dD})PZ`lUR?-~b|D~1Q46>(xk1*MZPfJ*0ri?q@+ta1nHO$|p4hUu`W0v2&(W=~ z-}}L%)5%eEJQs`a4LuZdgw4}i?HxrVW>WJ9k#YJqwv*Q^=GNn~+CD|(;GgZhg12^` zs&vNw-aBxs_D1_;fyvxIB9mq>kL^p-Ld3HOYDlTqF^0BAxkyzHirPkMwLS&Imv}+k z?!fkk=^6ko?N|x$HyC%C{pD?1usCdO$KWoVZs5vM$DX7m+_BITVLd*oMO&-8knCl( zGv|R)eQ!BR10wxQRRI2qKyS?4xQ2;mF_hh&3A{0U*y>gr%%W=}@|*E47``^7RA$MK z!dy!HR)F&~AH4N+J=vLgCbU#e;{Etn_@LBmB;AWhTF_S9*(z_Ewi*wtqEM)Tw?qf= z(rB8_qp{?Fa=*H~IP?Kmq&ZeyFPGA%;K91Mu+-tx> z#FKGJ2y3zl?H9%bKD*ed4f9oNCA?-nAC^2tx0zY#Bd#Qahw?EN#&h03BH4@63Ujr# z<^Kll`%|qDN)V}4B%?hkO_Y-~hrV>i&k_{r)8tba*22ui0|pcH2u^=}NO5&$#f=B( zp5{OqfBA54PHyBgMC~g&8Iv;aWEUlV3OMC)ifCW;xb`&q=L{JS zC8idB+FImSx$UCYHq1aiE6MHlHQ{NBsvtWR{P`Wq=#HMsoNbzq9OpdUFWv)wC8ns}leD#WHn-i#z@dh|78sKl>h?8?<32d;W1!USu5D9x|=`Ffc4rzLnED*vw zdETnEb_x857-PsZf)8~2UU|>_StYv-sRd*F^ZbJVKd`II#np?-nJa)08y*&jgIA(b z8uTFI#)q$cg`~tYUR^ge3bQ?CGM_o4RofaZ^APUwe#k2Z@RHU-Xazni7@q+uwdo-a zSPxf4cT%h*YsVjJ?GVUeGaSJOq6yVh(7^sMxeC>(ujbO_uD3M z_&8qO4X7WMQUj~c9(2ef*7ca)02&KpawmFI(4fjH`e2wC7s@k>=C3+$dh_4gYyVR1 z|1C)@t%Cx7Ykl!pCSs7n8csP_DnVhNg1HkWlfxK(8KGYbR}uiMqAb#4S^i=y*k$q)uZ!qwT%NHk;r6O+;T~hdJN3ff@J{o7`!eD00b0eBx%CT@VQZhkp_8}K z=(ciYbXI{mu?z*|qE72T)^AVLVU-G)2CWZaC4|?4<-6Y&iJI+gjnxMG)ZXEc5z*(Z z^!8~YtYowj(7&}U>$uUme;1KX7<2&oGz6S?H_XC5k80#ZY3=DX=H_@CuNw!u6VRGk z)04e_!ttmN{PONgu_H3wbVzRNu?M^t|HtJ|$WJCL`3rO;FP;`WExY4JVE|04)>}!j z*7btKUv6huAUYEQBMjeNCG!D3PZSi)r0`e!EL-rXWv|}%3kUCnCxLM|o_5>iF<0^e zB%{@s?aDBL8qvRT_9zfj?(@^>b9P=fA>X z(7w4noDd#;^G>*%i1sijbd^3|zdx`!>_{ye{Nq+`;2GHpN9TS^O7ZEI^nfV$-N){^ z6n3YuzsBe@DZgSeW3d}?tU(Bv`ae3T|MO4$Ps4TJ1DF;koB57R^79mSP+{&H4m`TJ z@!OWVyZ49biN};MuQv@oI4^Fs3>^|;wce$NKHAa}tGKQgoi%f%Me8lj1i!>6i0DJv z=4YbrVVCR{iUPlpcvo6dTBj22fboF_pdVa4zAw1g^6{Vy)nRg+lcd`pXgSxuib)m;S`; zH>f^OdiIMdmb{>*(^VvslJ4ipw+|9mHs{!k@1CibR?jwjbG99sG=`3STgUEG%~JF- zKrDC(8vgmZt802wheV2#1jY1VGlB1w+Q6_k@)^x49lQ#sBFsxR!*-#)^}V>;a(hu+9v2ZT8b_vC2Gu0@{AWc7ksYt z?Ldvz^@mq`SI*q@Yr`bE{H`c=U?W6^ov!!qocREAaTjES|~O2v3N?7>R>RopY%$z|;PtC7xLbr0()abd+EYzJTq(@%mR9rW1I~kDXFR=53Yr(I ztsdTiNA%#pDTQ+5!8l7k0*?k1o+nmx?OYw42p+@0D$C7a794Iy&JG(5lI? zFPmz*d`>ay|a-c_UtT)OiV5CfxU;!`WAB6Y9-dts&MlxPD5- zKQ41Bjsi&xmkck^mThamt!XO6&Ih|1Id11&>I||fMVv9tNYRv%pvrkayGSt3E^;DI zLL>OzE#_veh22izrn1^J4*X%{z)CU}7xudLb*uzoQzPIgAO91TT35QH92-0qbg)7f z!2qp?0`~9Mk_23OeOe32{G%AM<^AOi_S8tQZ=-SE!@g`FJ&?gRSttU%$HMLGP$$ld znxQRIj3Wq}g{2%61nEO^+Fehyg5S`qAoMoq;IfTbmbJqisQfUqMw~9`b?f z+nS3H%Dipy;Hr6eK+9L&Y7ZtNgoenSYRM0wurl)9T3xN2Jb$>+s0K{|q;dymi8U71 zp-ZDw{f&MN z<#}z7JJrpws2Y7do?gN8Z4|+zHZr-bzN5nu#D%R!JBW2$XtZ)X_r)gSj%E}o99rF!#5a% z;1G)DNMtv@4_n-+sM1URW3MQBL7!QYCkGgRcTo{CYF=gdQTUU!gU#}Y&6i46MhBiG z`>H>*iFv~6Y?I$D8qr3asLnIfeB(d%RyU($rb5wOgi1Jf_8&IO>v+m&ht##-LDg0u zXPy7Gzz%k*fRD+pW4he^%h~x^8*6CFuU05Yr+2O=wo@7r==L)J;74t{YveIaBK0&? z7%DX9FXTtK5o`E_ZwOavZG&vWxXMAf;SxJjVS3BbSiNfgeWRBj&(jd`p@Y$b*k@hd zaHTm;4A9-tH#e~zR*qxdX+E=7u=&q2F%t3(#ZqlS1kAWC7un-P1_JpmO3x53S2Hd< zUqgTO$_S$Vz`Qu&t&Bh0b{9^uCeSTf?pmp zD0Nnib?mb=klvhdX`Y@ou%@Xmw4}4yfqBuE6c>OH{s6~Sp;^Sa?I8bsk^PPLY(xM^ zcxI8Q2y*6-%Sl_`TnK6%D*Uf~jD@Ig;c*fR5UOc~ZFlA2d%D|+i)txlfjup76Jn2J zsJ{BY4#w?1n&N5wPMcSaXSW09JxZ6{JZ1L1N0$GkADDnDpSw0}{Fr6b#cr8?4@A2E z{TrY?cyh{6)wgz`Ru2dt+>?Ay`!g$=ry=R*y@=D*t)D?SBX6hzweX)|iOX zi#jpk835otbyt}(#RVT_?6_$5i_7eR7faY!E={3@ftROI2ousWyrae-)5c@>zDo5L zRWWH)`jd&!Iaqx7-PFlYi3=nM^LIW$qI0s;mcP2!pLA#JaFGqBHgM`s# zHwzAzU_KHWt8%Zzu`Oh9;|NtDLO{CETqO`$4;!;1~{6!AaLLlT()y zbm%gpK|`j>sTx*qB)4SeR|S6TR*YfI36HX|y%L_i*Dv-}sAh_n#kh-|@sAVG`HCOY ze58***Gw2gw?tYmzu1Hg;gPvX5$dM*m3de{GA-5 zP|Ck%8gyS5CE+dzyy&+{IIBBLJEjc@?~^EB&qx4>ARwXEyF1<$KO1`*fNbn{v=HFy@9Hb`DrE)7U`X7$_LEv8RBI3potxBe9{q#HL({?4d5v z(Pu%F9^=;P^!DHl2VB|GkBss!UI9k`*LeM(M>gA|!K)dCi!TW7Kvl3)*R-yN+ub87 zSXn^*nuHZwT^YY96Mh+NiFUDj{fHB4`ZJij$U5ssuitoDLg^#dFHt}WoYQkm=?RdU zzgzA_xe~-rJg(>Ms%`L}!T9Xk*dbL@^A(xT+Dql0aBwik#pq)_PJKW#fepr8_pLaM zrJHVDQfOD)J1K{%S%!VgAo&Wz;+XfJuq&mS^HqiI zbTqQjS?lK7G?$y<%64_@Y&9ZSClabu%$^|;8`j{2eKI1T!K2`-#Xa4=ix|b0DjKz| zOLNbV%7o4&Z3N3pc?NiN|9PK*u>9L3s?DDTo_%PZ^pH<=y_E%-?}6u*`teEBfLEmx z>{egLN%ms2=Rs3VR@>c+t2H`Pcd!9DVCjWUWZ(rc+|gr95ATsCvzC%sjs-$Y#oHN8 z4jB0$tRLJX_pSs6wzC}H*3>|lUj6U^{n@AMR7X@LWCjoL>U$~`Ch+J(kbrjFH$b-G zMQwykl?WY@je4`EbZVJD2y28-R_N?m?V|UPKvat-PMCNZnmuC>i> z14)sPygL_mTz`S8RdARQnLOHYoes^CMqW){J-H<5Qnt;WI2=?5FrrG8IL}UHUzI(V zLC1Op$?vmbEm*bG+Rvt8r+9kOp#5KPTBbyqRr1EabnxMY z&^UosrDua$$kYE1oN}P>p*2{kVh)P~>BTKXzX3J~Q}Y>b`JF_~VWIc-Cp@`KoxVJ^ zf8s~LYptZ+ayELTbG)e~CMNsDON7>;#>!=l)3(Xc%3l>bwiE_i>fZ7nkKU9w zKSO9(sYEV~;QCIdUpv%Ew+Wv1N0cunk)gcfXG5MS4LBQ;?RM-O-o@$*8Z+V2596g! z6zi6H-zL1IPY2;5{*N;ZOYskM=xHtRl7CRm)O_S>5CfXD; zKKOg8LNg^Nn)w9`ER1;#^?*m;b&9It^#U~C%K?`^5>SK_ntu0reE0+;;8X(1kOdCU zX!P$jQ0{Jc!0(Z^`NExZ5#lYgTi{-+sT+IoV!0Dud($eOBf z01H-3dXW<0ulfkm}V`lr*YlO=6GXWx8vo12)h1vg$9D6)9#Y6J&W- zmp|eSOp9Fdv-|6;jlMTY*DY6aF%NA~5|wWe+9Y--8LLkUhN+IV&WG2kM|GO=?3c~) zqL`#bgT7*KsHAY5Jq~P{a~CGkTr!R9T2$-Hk-*wNCX?CNk}0TTp8Uzs}@3#FDq37wZY`;wGAqh5_yZ-`RWQrcgDI8m@Lh#q= zOX<+6ia*aTZqKf09W`t}C#u0)ZzgAI62G6pVP0J0Y;oAG%FS;)(%i2NK>BdmQ~Yh2 z%Oetg7p%sZUchP$A?kXOOe9ugoGtP8=dn`g&k!)ImFwhc*lumT$107Kze?kroyI}m zfbEG~a|Vmmj)1r-UiO^CI6y069+fo7!|0$bl*dI&0BB|?_}W+^DZF?BJ5uiC1*SC^ zRBPesB5I)e+e;xXbej=mr8T@#bpW+4e^v5h8!Gm-+)wP7pp@W!zV)-;RJW=6&cMJBnqY7Hp^}I3h z-xmt!3DxAgm>YDRPKSB67j#r33f|>@Jd%Fb2XMpvun@lnsbDd+B$H`(B}-0u=;T%j zMk4Vx#JTlF%Tt4m)?ED7>)cg88~z8@FQI2x%jOB4Z7f3{R@agA$&bSH<5%WN`@z4i zUMTviF=O4u`~)LQ%lDn~d2jw!i(&NtwI##)>-Gi{%1WzdbyB`vjb#HsQ|&3JO7%s= F{{p^?6K?`rbf|Sq+y(Wa3kc7Yuy3hX3Is1NNd1+>Ho$Hw3F(xJ^ zuA4Wm-DP4rjAy(zI?Bp;mU-VB$#^;BclWwJ6QcX<5~FbhsAsIl#8jHVvFpUbXtR6Y zu<~PK;%+{89BT9W@Q8`&oWad&diR6v30YH#Gd1zgz0T46-n|Xe$c}%XlqPWXg%$4+ z30}2xV%BAkRqHOH1xHUFI?Hr&gxI|W+mASR^w;)TP7%eXcl-!rYm9ptuKoMdVD^17 zU|%TJr?RC%92N);!JhopS#|Z`wAFB=bAtN&hAIV!NH^8vDhfQGF_jK6RDo|>_)P=@`Cks< z#OLswHfM(po$c@Apg#!o;+7COnPtbpFM9tGY@_FmPTp8{C95}<8PE>tKDWl`O-6d* zsCK?fx+hOXbcN!D623oU1pE&}`kb9aU^&-^*n zAfHH;`=NUz{oecXm`9f*Z(qwQu{t{hj%vTbTZq=kwfsu(o9dA~&)y81$IR{*{G0%d zR&t}}M@8%MuRg}u>N*pyE!s4O$fS17JE9S>KGs{`RCmWif1S|dsO^q`M5NJ5@jfJ> z^OH|jXIIK%bJ?|)h-vtif*y4hR0oyazhDk|erF#j@-l9c@hpi&Z6tH&K|5C=$IKD-bOBX|=D!h!{GUT8K?seN&Hf3zLRLL^YA1 zei#>!O(&=w`i{K~OF>NZ>2S^5+pCP7Hp&let2?K;H{{42oYjkrd-&=5tqC2D0xOP{ z)Hy}W1&rXKVGre~+kR{iz}={3>-7HG=4!lA6|*PC{n$>{#Qp6LWKGIfrkyP#HIf2_ zdGp!ft@HgC*Q4#O(;aIY0Iy+NBK>^h968`W>Z<76gDv^g>D<|kVZnQJll$F#PJ@c*o^Cy!VKx?p% zN5Nt_oxa@bq-^KET^Iiz~DW@>56GruZJQXsi$Qa~oM@F&R11e`%`y zW&#PQeRXEynO2V5_z7d~w(X}|>6KgC5u!QSDuQL|C&v>tL)<9#R>cKxpgKi^S3F>( z*cYXrLpu1FzZJm!U?2sqA;AR7f+x*NL)R#Wr9mNpSh|+Vlp2S0TSMV+gFWOD z|A>S8-Oy6sYb)WR1*}0fDcY?r3J%#dj-sp?E8%qFOJ+^kre`TLbLYg%o z6uKn0YiG~9ar4@rbpv0*^OljaN1JU$x(X-VNcq>eGK%W zI|?-cIum~MZnE6*XwpLVx11*G@^9F|bcBX%Ucpju+qbShGZ7N^WN{BXw5Svl@S6Kr zJFZbs1tnE)DmoO@NG+lcNPP}(DOoYnAF#ftEbsq;d2|tSsP!Rr$0Y3A#peYxXpuzm z#HhHdL5z`ntJ3M8AhR4x{y`?OF)ivv;Jh-!syH1qX<443KhZ8fQecOb>W+ZS5!3bk zz`uia7Y^2isF~UHRu1ZG4{3oo+uzH#2m@D>i4z+B{eN2Vi{OIexYsR_T<|ySujW2& zHZ>d!sh*-s+<12+jCN+vvh>Z4o@f$rWKf*B zwzs1R72v_0K2)}Gt*UK7z>SU1Hn^if2W2{W{?hY4{>zpz_*?svf`-vYIh4XfAqxJS zb#N*ouWzK|P6nKDkG=+o%NYhFe<|w@CWi8?^h}a7)NGHM1+P2Z*5Rs z-Ft2VBYDBPXtm{NlCbROKu6??p@?Ko^kp@X%jm%BfGW|u!7?X$+N$$h2X8uR-_DLu zxQCTEbk9{BYvbB^QfiSv`_crZN8Ay0JFYF2F0gUgj zln#Ic=*q-I_{#@#-yxKo`+yTzc zojnrlNyhhS==2>_iL}B2G92oi%V%L`s#L})fz+U?<$KhlG7~P@#CjmoGqWJ z&JxKMeml94r^j}+pmo&d&+}zQ89&%A?UrMi+6yV?M`~?SN#tD_MS>hc!-xj`7%C|I zMy~tg8qdfTf_S1{{SI zv!19tOxFa(Ibvmn%HrjV%geea3PtX3kL(%GALN?$`U=zARMkFOB3(Je>~gH>Vl;I> zaj0rA$vTmIg3tJs!?yd(#?r>BZWmLw(^$Er!BuVg%6B_jb~X1V$W+b~9{E-tuaY;6 z3o$<}iquxPU{8kWHpKe{jH-{i!Ln!2^Gjz&%v@a#@fT3)YN zeJz8%OA!&@s^#Aztj$p_;%?jc-QdKh3k*euJ)L6)1nVeXfSg9+aUTlWj?!0I84STs z>XThiM1h1~H@`k<_BuNR{F_n7UI+}S{LQn~VyuD2ocO=s)-yWX zu3QZ@1G~7VbHJ>^!E+AIG%cM%sTqZKG#hZ6NSmW2+S9r)-ETpw*EsQ1PE8+2*g!oY z4!<{TAhmz~u*{P+p+Q5lQrqp$H~n59nbeTF5Qq*aZnBsS`kIoN4XHo0${O2UaC_56 zWa-aPa?@UDJp>Gv%g)kP92`OsIN(ub7%W>L=@eaZ%3vY7G-2qSy^`Ckm(KPLX$-10 zT)06`3ypGkOb$$DEp*o9>eBH10$&bX*ZYH{XHXf;=!@%DUr*%x?9Zp}`ZnmjN>UgAAZ{UDj~D~%3= z*72Eqe#UvbJ@n-3PJS-MTzFgBcO0}%EhOHOc7cJR4ebRBwu!)@>4ph5hlo{Vw|ZbO z&Gr%0FZ0r#bn>3ycm?al99gPGQmCc7NtWxU z3>Wm~8Mo5JdbY)G`dfEjWiL2OzpN9w{+@qr1&n`#@TnPs@wL0$N8{r_8}6x)XN!ig z&E?ke0}<5>4PNQ&3TX6g7d1~u<1 z0r_5Fo#fC5urmwMXw#<8ZO>GCuejPt&Q(=v;{y}UaF+oLDmym=^q zCMMJ)zn0K)f`Osna%KHIu-C1hcNbkx#cu1g6-@vVeI2=84a{s>fjA$P>juN0*o6>l z>$ECY{fWDDlc-yiBp=i|txG$j#M2Yj@We>Hsyk>qo8%?{-#|V->2RK)Nn#mZZ)5dq zSV@L@p1o&mzCp6O*cF{;lzNXHGb*hD%@21 z+kQyih^7{*yyR;@nnHSR4W*bF4H%J9%(A<1EAsG&&H2juU`Mu58-!_pLUDy7)I z6Gb0Z68bj1EN#jua4@fzpp_i~g@!GCjFU3cZ6!4>aO4*+(JzxW`=7n@l5TU;7Buk; zk-L?pyjSYgf!F|+ndW^>oxvgRXq`z*a1SyU^>-+zR5CcU=GpO^EX-2|7af!s_|uud z>2Za?fyRxXrF%?+N+k`g?G7*$(YC-qmv)z*$?3|Kf^J{#2UW6}A;YpSP?;|>d!7mL zCzS+`fV>zU;7z~;+lfbU3XccL|kO>i-o&$ zOi&I7WiPM?0?1^vqC@6s?Gl{S;S>E#iEhHAl>8 z{;FH%h8ONi7CTdem2%+S&RxmVed`Vh?PHNR(};u&;jIgeL{$@A zn-|C|dUsXbqP>z!Cm{TcLxhS9zYb5zLuRC^86w^_4;#m`WO~HCGyMabT?SHW%PRv( z`c!=hhYnc$AW?q6WP)c`DyPbo^h#|5>;*n=`K@}b7BJO_#fcoub*>iFHc(D{UbibO zt+~ZE9&4b)pG^G5 z(kGw3wfRmySv3}O{AR@Sw+%WIJd1^=>Mxn?Ed`~ZViqgNI}ofO^9C@#^#kI{JB(5)_1?WwQ_)bSJonn|JRshfThEp+;|s-uN$?!$EXt&vde|O+HW&cR)vSG_(s}l z{)DHPXP9({?p%FYK#cxE`uhIF)<`%Sk#Ya3|5-^m+b!g{XBHvq-fu&DDYZ3HtMIwlDii!9vB6_kN60^txR;C ziQHS-bkwd2K_XzES6@p;TtzrU^rQ5ND=qn2{SfV3nADdQ58+SEPvrvq1J<9}M+US9 zJ~)U&01oA#2<1H#b+2&dUk+UO@_y(w@YERQ0pH={QlUjzSQ=|!?4HY6kV?#cjn|M- zx0J;FUC`+)Rn%Z!-|($NG0zq*sF0n1Zb1ufXa$DD+7nhA%`RRwKSmRI3+=Fc;8${T zTm#?`%GW;@=$1*%R0DB^LDCiS(6s?>Fibsf-^BE%+6rRoNa4>r!_%3^_H*P6lDeU+ ztw}^rY_b7jylDO5eTU{9$U>udX)gU0&p8XnxR2j2BNj;GMP}&_?mtn(ygMsENq*f$ z$@5v|MKf|G3WgkUpHahN*r!H)ktgbaz?XA1n}?@*6=sGUD32mSPPipoR;?5rWXY&q zCXG`6YI^4nl=&33DUVk7?|>(gFK6@>Tg}EW+5oS?fkR%pfDA3jz;qhg6Y{I zKH2C8x}y@SE4(935mn%($_~U7e8^Nn`A&<23|{l{*1g$Wnfuq*vpE)AH?J>52vBU* zL2+RfTe8Uk0Z!v@0!TXBhwPCAhe_Azu&UTyBigeb-T@opby8o@4XznJjl9jt{~ay8!cEk1AW0l0GE0^m@{k^0ws|#sfX#@7+>GTpHaD9DbQ-69O;VqsgG(w<8I_~X* z+IXs1vRyM^7?2b1xNp4WJ!J!u?3KH6mY2C!r}+RIBQ12+A{Y1! znt<#7qSYsBuwEZ0!1v6y1_TDhz|V%sf3l^s=0v>y>P`3B}j*xbp?j;%jV7Wt(MiPu92ssPQ0h%bDKC^#wALD%r$MEff|ef>ItZWn5z2!}F9_W}bDEhZrORSn$F3?WpRK|r!_#i?8t{(3 zZV&~*)olX~a!=-31J4yT9%6JMETW}RDN1*t=_XI@g9!y(w|xw7A@ePWcP?tRQ0rKR z(nC+jg%Q&|(n7Q!_uFm>CXBWWO>*{3l^TlNn3_W?l2MkgBlsm!2)g+>JD6RoGiTS) zRdy>4?sbGIqtjE_x}_xP0jgR?%C4ZILo?Ox%w|PcoYzh*t=l?MEX!7~BI-yl=|{~9 z05?1>0u!P-OYkM2?>Btcc^PbTl> zL-AFHKbQk??g+8{O|<4H)JR3~6J^7yu7JBx&4qI*9lQ6sjA^#Ga?JnVKI^#846G5R*-N%e|k+J z?EJ;LYa>B;jjLb+MG*yksLX}o{b$}?&~m-@7CcQ74{T3oHT z;3irVQk1SYs)P>5Eg3&Z2&qrPaDkWu@pApRFmo}86;xmmYR znDBt%5IlXG>!HtfvJ`8q5ny2tYcGg`-xHr+3Z$r9%yFhoZnE6PZb1cTPu-m@H(5G- ztP@_`%dn5RD-{RBXOzk8N|Wb?NT6AnrOW+DZ-3k|0#5d8pkve@smITMpKe+cOGc*I zEiu@Q^=cZaHRG+t%(~xam;8a}{Sg!R5J7yg3Tu9!>!HE?j{{2q7GS3BV~aQQ0PR)^bCP)d# z_HRHLY8V2Xq*Lu*JuWEJQv5|-JoG50@z-(p2-DQYtIrs^HLYNQBX8*0Q~QrE4A#y}L#I^Vaa(7&;}cCJ;Hhg>c%4M!;q0!f4a+|ed;Dm9%#)(0NhW->qj4;+%dnh)u>UG!8!~Y>KZ*#?O zkE{)+Q8R-kYZo(z;k+30bui$}6ClpA&k3Rj zOENdUn4R#@?z^dxOp&M%Dlo^K+j%bkIaWEsUK2GKkks*l{rUHW@Lo`m4Ui`GNBOm9 zIupke;PZml*X^SefN0`xYpL}Atfl51+J9NggnzS^p(4d?=EQ+*oYdzn#ja~}| z-rmXq2W;F$_?y^Fh6>k>1=NZDMoA5=+`*hx4~tgcfv4f?OVzLI#WYB{hBASR zuegRYya5)*M|Cr|x&o|;LPLtnLV*S{Uc2sGKQJd^Ls@1WW;UNPgXJ``N-ks`m1P?p zi}|CE_U8Qh5X+tzpdr`6qxd9Adu`|C;Lfo{+24+`%IMt>9*yJO*39SN^D(Z(Cw3#a zO=L?&b@Gl>Kq`rTm!_<7$tLsOlxW3c*WO`FPVff0E8W+!3akQ+T?PrjiCV!Iu2wj# zADTT@tf+G4+xA!;_|&cP6ce7X4p9MbrbqWA?XGm%<&o((gh|8)HlroYH6M@)%PLJ| zylkm$ko`GZ>f^IC8Tf58h4XQ|oq?g-k%YthuSInWogaE4a#hYlKf?viN zWCU78*v>bf4Q;uWG;M(%`3$M~s%fV5zW)`1&OK}pUw0uRJ^=CmMUp}cl7zKwvgkv% z$W8Q2l{>2?sauu1Ie(JXLLD*DnM)zgan&l02`xK6^q+1JWW-7YrTE!MlYL=aJDrJL zypXFKjl5q~|2B#(I@cUbHk%I#n!toMIAg-KZzR9K*J&TMr#A3pn{u+lTV-gf?n{dL z^<0=zmg79_9! z-iObidI$QD(b->X>|FohEDe79&)*C1qsN(RC0R+o8L^FHFNEKGP0iydZR zVc|8sYIu`{Cv-u$o=680fE)V7ttKUtND=g(`i3R41gKqlf`YbFJFS&PJ*qN`n ze6QO1v9R#9?tig<^C`a1!Xm9^YN&rJ7(~n_zWSk^Xe8Om_@OA#*EZu0>q&6AxAvE` z;;XOhToKhn6PmY6o`4k(d}dMX`QnB_K=qFt{Qc045dSQ>93gv;fBWa1brGXc{7g0vXpilBKr~&W*v=H zUtPY5J{l_1tKQ9~TP6wP&e7W$Mb&4e$m$pt8eJ!EYkOTV>f`f0N3LH<#p#`46+*n_3D+OT>8+Wy(Q9}Vj6{LlTJ z?A_=%+~-m;T%Ds=oZeY8!?TJE(>;OrtMzjP#4Nlm9&v7Qujkds{OB^A68;m@cI$On z2<>d{L*6X~KPV^g*jlNFOx79xNJ(nounXj2kkh$Af}l=l(k?jXyG=zITp6sYr+N@V z7kI7F4Z5vhovs(?AUhX1QJZR9oxA&+N}fg0mxoEkvXD>ZT6Eu z@R@?EZo1xV1RztTv-Oh$`3VO(-$~r0Xx&I+YadyKHa$36c($FQ*?m+-Epv>V}ZRjVVy_rFPr_pl*BE_W zi@D-~ZP|Gc(xdhrX?*#jc(2Uh4_18b(DbJ?`K^^TW!hiF%j3w*1CXMuk`r4H1-LK^ z9K!XDAqTyiV-*Q{E+&^e5-`MH!@bK!HKF9@%-gX5ls2lDYwP6YHQ@8a)Ci|mt?{mx zJLp4YML{aBGI@y_kN{hXcnbL~ceLLp6$7J6jynQCU^r!b79&0H8fMN%ZjYU5?)+S0 zag~s{taMMQ9Y#dhr1R-%Mzcp?#n=X1s_xozD&R)@+`5H{Y*p>rTc|Douej5mXBfNQ z8}l-OWZ0%Vl>F_XZdMBC#j^3d?{lB^UV@NmDA1Z28fk2BF(yyviHVdlUP%Y+VF`o} zgGFnAVJ5A%j)2)X)|mlJT~l>B$1-6kHn`4-!E?BHWO5tw^o>TNR0kv{(--#Q&AsWt zaLv`~HQ3Py=Bs9nIM9I22k)fsRp081GbHU(ls`@FPD=sx;uTq?!tF7oz0GlaT7 z$W=CYiAEi_q?L*yXPvth^|)5{CMup{xD{<2)JKou++01BNH0JoIU0vTE#w>9Q+38E zExCL})#}CEFMY6?_X8%uT-#qDcNQ%VC7p5lkt4$6xT8ghVHiX{>g@Os_?UiIyam|( zvZtKFoj3A4WYSvD>+a27FO?P(DLoi+4C}mp)aoUR%MQ_qY&yCIHE*Z(up9VP)HzB+$!6Vo5vg-=ErfN} zGeO=nn?A68E4Su781W+XLnu}x#oQ@Y3H{?VzA<3FSPrS8i&__x{8%L8R z@ze}RV9bFI=ZX)^7dx5IFErOAb+W%|uRIT; zr_G`A!!AlCZ*Z*ZMi!iTa;5-fm5l@5)K#K`R|EK$j!e_cCHjO`6Zi)y$LrzWxzf;kzFoeDg!T)3iu(wW#?u;!(=d&9 z5v3nj+DW{NO{{5w$_A2Ux^`~*Y((zE#NFvH3h+w~%*vJxec4qg}Q5r(bSW9AwNyT|nbw3=Z zN}TV1zkM5VQ3bBD$~VL_CHe&bzHonVuqo$9HRVe{1~AOigQ^|7&;pXpq?ZLfSYIC_ zUHLiuGGm!PB1)wGbW%oRpyZZ;Vna046?n^}YHw02dUqcFg66H|X71d5q;chjs~%X> znMh8rs)g`wzIV)ze*!Y0Mpd7cF8kWT+ZwXA^7SF@2~#_IuR}XOAT@){R+jO_1^1pq zEU)nO2p>Ql?evmF1KF2&3_{y{8Lze(uQ9cc3EhBT!1*KLLP2Dop@H*Pn%wk(aVTA> zSp~E^U)=N;g=o^$$2!vNi7z=Khx8J3XqP+}MBd>Cr}l2ov_QmWb(xH0QWi0XP7hlP zb(P*e`_H}VoAD3;e+*l@Ue;P?^oOS2ZN8I&|75GL_<0!R4}yOfGv6m3?KdOk1m&@$ zs0MSc>~cSZe7n6z;H0V2^sp2vEGN9lg~*zSE=b(=4lP-IwEPcoPK*lrCGn1< z)*?p8qs%iU;&u}e$pohw0!b>qpo6pLgSjw&GZt}3$WiNTulvCD+gInLk8Q*2?{#l77@>?T+Hd%26eTW$N@CRoP1&5wEO7bh4nGzLhr8ZnKx&vuRaz3^Fsj#}kvLk{$lw~8Ns_7d6 ziM{LI%f;NuGtyeJl~ed-iF7qUx`01K3cz{JZEKY_J3b0tUx5kwndkB3sfHZicv}J0 zB#c=yLg$17lCNX(pA#IHG$GMU!H3;zzX}a7YJX7^?sPwdD4_?h-U|-MV4LA!-~ArnXJaA;!SF{; z^^_urZuN@)>Mlh=(s0(YL#Bh0$FJIaTDg?c+aJ}V9`u!>9H3cRVA`k?-i^Ai z(?>2XjKnV#mY6i+zZ2NM7CE<6K)*|6s29s! z)4a>$OnVhJCL5ytYEo-eNbO#l>0!e7hoMz(BrL}rWY#g) z>`>>~oM_(Anw28r@olS%)z>ti4CK>CrLxw&Tz3bL$H-^R?C6H}W*7-$&kGa7_nRD!PMo%=UQqQRjxxBZgE6}rL2J?+}x(IuHXsX2GE$b7j*)u zr9gO79NXAW`u)$@5%N++sm)i+C{DI#%^i*>lz5CxAwQot@^?q#e(b&IVapsJtG%Piq^sv}sxE-0 zX&xP@CSpncHV@nLxQCA$1JeU)q$)dq5#p7&_*rOiGd5t!_9v1k*w*h$DEM-3=~cMg zNMJNs&ps%RJbmIz;g(C4Mg7=#?Tp>@iBxcjlwhyU*mf>-M|b3Z*JEj4M0UoTm`sH5 z=IW`S>@xEJO+-O*xeBNc{E2V0eBZ$bW(Ivs7}4`1F=E6D?trijM{BQ6e=d^(hbQeV@Qaj!B0h>27K@3^2lkoSfsl%wf3>d{A2Q%E|&Uv&!!&(a&NH7j!FQO|c{yRRz_m5Lc-<`!N&8fOInb z22EZ2j04PiTB_=S`@kqBH+TctoB8BiO_&BGaS<#6U)9wV5FbNqvpiwPW@PfrgnHvA zUJR7xJz?WG{+3M^E>NlEtd4fdqePfVk|`x+7n8l|up=nqibe}%V`I0cnbXs&;Ajna=Mv7vA5SfgA?3Lh30`Uc z8gg#=9(`F?nI>j$T0-WGAsy7Yd%XKe9Kkn|i!XOVu>5u)?_L|EQt?979{MEYfC13x zM16~|PBXUv5Na#lU~0 zw<0ZnE3!Yt_f*X+>HVEwer<-Yf7&#TeGaexqGN@OU*;e3vmqlCf`|Yg$vI4py*;X& z7Bc7X&3=v5CNU~exN+^%q%EG;>^tenbdLhn~-e6=ed3LDd5Q;6lk7Knf zn=W4MpV+r*`lgvISJs9<#0-k!AKYwt2+j#4AT?Yx2=(I`PLQqH?XlaQv9hG>W|+|C z>-lcd4aiRzAw>kuy~UHj?2lqA;dPt{1&%1pPc$RRUfXz{qP`6s{`?6(2K(jaC9%tI72JDkNJ@fCOd9NC(i?SE7c55p!gEQVkx2fgDzXtl^0b_0dq#jTKcWH@K;qsC z)Zx62$;>9T`A;?vsd=^fPU_f7EFsE^Klh~}63(i|NU?u~&LL7|jsn5s)qF*BMVjH? zqa2Kcz|J?{ezU9kg^8t#VOP5{irPJQf1$!wm;rU*dx3tKw6r&-w1Ke%cD&QODl|K`d$A?OCb6TZmBz+!UkYbYCM}9e@s4P&nw&?v9*1IIXe7qEIDbxyHDMwZTUN zm$FA4O?}Yhq;bB4x=|e`|3j!m{og}n zXV|J3di2g@eUC8G#dgIhTzs*AWd75N+F;1HTu*4T@1sLzMiqx`1c=mfR|K;OzEP^N z>Hnl^EW!VL7rw3KFf%%%SYV69E)5sN@F%+n!m2x%WK+(-wto+}v^lzAL}T%;uxLa) zv1~QQVa7mD`5#z+d@id17=zO*L^1LmK&QvjuO?(A3vTI;2Mud{y@-pb!( zWnR{HumYJz-Xz&Irn%6NK^GI-hplr&&Z*F0G>0&cuMmr**mPz<&5evFCanrzwHu_l zEHg`jORw;gdJGd_N!G-lJIxoL zGo}01X;s*;f{XN}NIor?v;GvmmX9z1AN2)ci#`UV(WP=U-YXS5$p&(3tcs+-JqEj@ z{RCJYHB$6Kc+VQv#cO&hS1QrPxfI&cOv^L$^mtyv^EC?GIez0ZM_zM(E}(9IZhR)b zeg;57t{u4%^8Y|fSI_PY0^d?0$ko`wp>xjtsajB1kdLIc=lA`FSI99Q>b<>k-Ri7O zwlK&3@MVRaj(l`AKc6|*VIyAK3+5d=rR$M>Q4qEJ*{55P z#`E>(y4c%r`R|>^HkpBP?wm@>$-HA-*Q$zQQ|Cor+1mPZEzgkFwmlHjEn7AtAN1CjhPNGbDPNZ~=02l`5Bc<`zbc<8 zU;R3iCNj){4jb-YZag?DA=mM?VUofd5c{(NExAu;X4(Wj6I*1?2tJ~@+0r3sdT-;g z%a`8w%QUQB#2w2zhta`|vV#yiGSn3*w4LDCxB3#eEw4?fe=JMt1fmnq2}c!H@*)cyqh|4*088T0y>#a{)E z3$U861nuy;rEvVM7?S_+A-8GI z#7xv{diXsV%zJyCMbwjH4G!sK$}U)$C@aS>ohXQa4(TN&cL@=0ZF72yX8M!W6mxdH zkcAS7L{O4c{*G7&a~_b~7EM;ti*$UdQ7GT~88jXg^iCbTu*p)q`_zTw^2G-Oa>yTY zq0CFB3luSCe?k&#L=V4#IEo5#i<_+{9he?aof&ba-j51B>hWg88)pL?QCZ`svihI7 zNbc3BGH&CikcJHH)k>*`peDY28rlQ^69(vHA34K;X6p;L%g@mVyubHG>&h)6KeK}d zj-5%H6~F|12T$jY1E%dC;MOdcV?c+Dsv?Yb59&f%G)lgfNBgOFvdegK?gU}QSQmaV z6Xjw{{hvgM*WLfSOx!i2wY`U{HA;H0^Ix1*lZ zq-3$EwWkpd60Z$FS{JT`T{fAOndFirbKO>pK z{rq>T)B@+}I;bvyMWYd7XqyK5F!k(~p^?H9ysEw)sc}}cZ;55#j5o?flV6W_#LL;j zJ8?=UMfKHGV;vHjpO$=d2M~8*1ob|3Tbxxr5hA;9$6fGah5}r5=HKcfCJG`D5u(>+ znNE^m-v54=OB1 OF*ULVE<2`y`kE literal 0 HcmV?d00001 diff --git a/debug/accuracy_tools/kj600/kj600/anomaly_detect.py b/debug/accuracy_tools/kj600/kj600/anomaly_detect.py new file mode 100644 index 0000000000..0ce34fe22a --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/anomaly_detect.py @@ -0,0 +1,86 @@ +import statistics as st +from abc import ABC +from typing import List +import sys +from torch.utils.tensorboard import SummaryWriter +from collections import defaultdict + +class ScanRule(ABC): + def apply(self, history, cur): + raise NotImplemented("abstract method apply is not implemented") + +class AnomalyTurbulence(ScanRule): + name = "AnomalyTurbulence" + def __init__(self, threshold) -> None: + self.threshold = threshold + def apply(self, history, cur): + baseline = st.mean(history) if isinstance(history, list) else history + + up_bound = baseline + baseline * self.threshold + if baseline > 0: + return cur > up_bound + else: + return cur < up_bound + +class AnomalyScanner: + + @staticmethod + def load_rules(specs: List[dict]): + if specs is None: + return [] + alert_rules = [] + for spec in specs: + rule_cls_name = spec["rule_name"] + rule_args = spec["args"] + cur_module = sys.modules[__name__] + rule_cls = getattr(cur_module, rule_cls_name) + rule_instance = rule_cls(**rule_args) + alert_rules.append(rule_instance) + return alert_rules + + @staticmethod + def scan(scan_rules: List[ScanRule], history, cur): + anomaly = False + for rule in scan_rules: + anomaly = rule.apply(history, cur) + if anomaly: + return anomaly, rule.name + return anomaly, None + +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +class SummaryWriterWithAD(SummaryWriter): + def __init__(self, path, ad_rules, anomaly_inform=False): + super().__init__(path) + self.tag2scalars = defaultdict(list) + self.ad_rules = ad_rules + self.anomaly_inform = anomaly_inform + + def _ad(self, scalar_value, history): + + return AnomalyScanner.scan(self.ad_rules, history, cur=scalar_value) + + def add_scalar(self, tag, scalar_value, global_step=None, walltime=None, new_style=False, double_precision=False): + new_avg = avg = scalar_value + if tag in self.tag2scalars: + N = len(self.tag2scalars[tag]) + _, avg = self.tag2scalars[tag][-1] + new_avg = (avg*N + scalar_value)/(N + 1) + self.tag2scalars[tag].append((scalar_value, new_avg)) + detected, rule_name = self._ad(scalar_value, history=avg) + if detected: + print(f"{bcolors.WARNING}> Rule {rule_name} reports anomaly signal in {tag} at step {global_step}.{bcolors.ENDC}") + exception_message = f"{bcolors.WARNING}> Rule {rule_name} reports anomaly signal in {tag} at step {global_step}.{bcolors.ENDC}" + if self.anomaly_inform: + self.anomaly_inform.run(exception_message) + return super().add_scalar(tag, scalar_value, global_step, walltime, new_style, double_precision) +# if __name__ == "__main__": diff --git a/debug/accuracy_tools/kj600/kj600/anomaly_inform.py b/debug/accuracy_tools/kj600/kj600/anomaly_inform.py new file mode 100644 index 0000000000..0bdafdaf82 --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/anomaly_inform.py @@ -0,0 +1,75 @@ +import smtplib +from email.mime.text import MIMEText +import sqlite3 +from datetime import datetime, timedelta + +# define class InformRegistry to get inform_sub_class +class AnomalyInformFactory: + @staticmethod + def create_informer(**kwargs): + if kwargs['recipient'] == "database": + return DatabaseInform(**kwargs) + elif kwargs['recipient'] == "email": + return EmailInform(**kwargs) + else: + raise ValueError("Invaild recipient specified") + +# define class AnomalyInform to inform with database or email +class AnomalyInform: + def __init__(self, **kwargs): + self.inform_args = kwargs + self.exception_message_list = [] + self.time = 0 + self.current_time = 0 + + def inform_fun(self, exception_message_list): + pass + + def run(self, exception_message): + if self.time != 0 and self.current_time == 0: + self.current_time = datetime.now() + if self.time == 0 or ((self.current_time - self.time) > timedelta(minutes=self.interval_time)): + self.exception_message_list.append(exception_message) + self.inform_fun(self.exception_message_list) + self.exception_message_list = [] + self.time = datetime.now() + elif (self.current_time - self.time) <= timedelta(minutes=self.interval_time): + self.exception_message_list.append(exception_message) + self.current_time = datetime.now() + +class DatabaseInform(AnomalyInform): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.interval_time = 2 + + def inform_fun(self, exception_message_list): + with sqlite3.connect(self.inform_args['connection_str']) as conn: + cursor = conn.cursor() + cursor.execute('''CREATE TABLE IF NOT EXISTS exceptions( + id INTEGER PRIMARY KEY, + message TEXT + )''') + now_time = datetime.now() + for exception_message in exception_message_list: + exception_message = f"Current time is :{now_time}" + exception_message + cursor.execute("INSERT INTO exceptions (message) VALUES (?)",(exception_message,)) + +class EmailInform(AnomalyInform): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.interval_time = 10 + + def inform_fun(self, exception_message_list): + subject = "Exception Detected in Your Program" + text = f"{len(exception_message_list)} exception was detected in your program:\n\n" + for exception_message in exception_message_list: + text += exception_message + '\n' + message = MIMEText(text, "plain") + message["Subject"] = subject + message["From"] = self.inform_args['email'] + message["To"] = self.inform_args['email'] + + with smtplib.SMTP(self.inform_args['smtp_server_name'], self.inform_args.get('smtp_number', 587)) as server: + server.starttls() + server.login(self.inform_args['id'], self.inform_args['password']) + server.sendmail(self.inform_args['email'], self.inform_args['email'], message.as_string()) diff --git a/debug/accuracy_tools/kj600/kj600/distributed/distributed_ops.yaml b/debug/accuracy_tools/kj600/kj600/distributed/distributed_ops.yaml new file mode 100644 index 0000000000..51c803eb0b --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/distributed/distributed_ops.yaml @@ -0,0 +1,14 @@ +distributed: + - send + - recv + - broadcast + - all_reduce + - reduce + - all_gather + - gather + - isend + - irecv + - scatter + - reduce_scatter + - _reduce_scatter_base + - _all_gather_base \ No newline at end of file diff --git a/debug/accuracy_tools/kj600/kj600/distributed/wrap_distributed.py b/debug/accuracy_tools/kj600/kj600/distributed/wrap_distributed.py new file mode 100644 index 0000000000..ba559baa44 --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/distributed/wrap_distributed.py @@ -0,0 +1,156 @@ +import os +from functools import wraps +from collections import defaultdict +import yaml +import re +import inspect +import functools +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.utils.hooks as full_hooks + +from ..module_metric import get_metrics + +try: + import torch_npu +except ImportError: + is_gpu = True +else: + is_gpu = False + + +cur_path = os.path.dirname(os.path.realpath(__file__)) +yaml_path = os.path.join(cur_path, "distributed_ops.yaml") +with open(yaml_path) as f: + WrapDistributedOps = yaml.safe_load(f).get('distributed') + +npu_distributed_api = ['isend', 'irecv'] + +distributed_func = {} +for f in dir(dist): + distributed_func[f] = getattr(dist, f) + + +def get_distributed_ops(): + global WrapDistributedOps + _all_distributed_ops = dir(dist) + return set(WrapDistributedOps) & set(_all_distributed_ops) + + +class DistributedOPTemplate(nn.Module): + def __init__(self, op_name, hook): + super(DistributedOPTemplate, self).__init__() + self.op_name_ = op_name + self.prefix_op_name_ = str(op_name) + self.register_forward_hook(hook(), with_kwargs=True) + + def forward(self, *args, **kwargs): + return distributed_func.get(self.op_name_)(*args, **kwargs) + + +class ApiRegistry: + def __init__(self): + self.distributed_attr_origin = {} + self.distributed_attr_hooked = {} + + @staticmethod + def store_ori_attr(ori_api_group, api_list, api_ori_attr): + for api in api_list: + if '.' in api: + sub_module_name, sub_op = api.rsplit('.', 1) + sub_module = getattr(ori_api_group, sub_module_name) + api_ori_attr[api] = getattr(sub_module, sub_op) + else: + api_ori_attr[api] = getattr(ori_api_group, api) + + @staticmethod + def set_api_attr(api_group, attr_dict): + for cc_api_name, cc_api_entry_func in attr_dict.items(): + if '.' in cc_api_name: + sub_module_name, sub_op = cc_api_name.rsplit('.', 1) + sub_module = getattr(api_group, sub_module_name, None) + if sub_module is not None: + setattr(sub_module, sub_op, cc_api_entry_func) + else: + setattr(api_group, cc_api_name, cc_api_entry_func) + + def redirect_api(self): + self.set_api_attr(dist, self.distributed_attr_hooked) + self.set_api_attr(dist.distributed_c10d, self.distributed_attr_hooked) + + def restore_api(self): + self.set_api_attr(dist, self.distributed_attr_origin) + self.set_api_attr(dist.distributed_c10d, self.distributed_attr_origin) + + def initialize_hook(self, hook): + self.store_ori_attr(dist, get_distributed_ops(), self.distributed_attr_origin) + for op_name in get_distributed_ops(): + self.distributed_attr_hooked[op_name] = DistributedOPTemplate(op_name, hook) + + +def get_callstack(): + callstack = [] + for (_, path, line, func, code, _) in inspect.stack(): + stack_line = f'{path}[{line}]' + callstack.append(stack_line) + return callstack + +def op_aggregate(op, t1, t2): + if op == 'min': + return min(t1, t2) + if op == 'max': + return max(t1, t2) + if op == 'norm': + return (t1**2+t2**2)**0.5 + if op == 'zeros': # TODO wrong + return (t1+t2)/2 + +def update_data(old, new): + updated = {op:{} for op in new.keys()} + if old: + for op, tag2tensor in old.items(): + for tag, t_old in tag2tensor.items(): + t_new = new[op][tag] + updated[op][tag] = op_aggregate(op, t_old, t_new) + else: + updated = new + return updated + +def create_hook(context, monitor): + def cc_hook(module, args, kwargs, out=None): + args = args + tuple(kwargs.values()) + if out: + out.wait() + if (dist.is_initialized() and dist.get_rank() not in monitor.module_rank_list): + return out + stack = get_callstack() + whole_stack = ';'.join(stack) + is_target = monitor.cc_codeline == [] + for pattern in monitor.cc_codeline: + if re.search(pattern, whole_stack): + is_target = True + break + if not is_target: + return out + tensor_args = {} + for arg in args: + if isinstance(arg, torch.Tensor): + tensor_args[f'input_{len(tensor_args)}'] = arg + elif isinstance(arg, list): + arg = torch.stack(arg) + tensor_args[f'input_{len(tensor_args)}'] = arg + new_data = {op: get_metrics(op, tensor_args, 1e-8) for op in monitor.ops} + context[module.prefix_op_name_].indata=update_data(context[module.prefix_op_name_].indata, new_data) + if out and isinstance(out, dist.Work): + tensor_res = {} + for res in out.result(): + if isinstance(res, torch.Tensor): + tensor_res[f'output_{len(tensor_res)}'] = res + new_data = {op: get_metrics(op, tensor_res, 1e-8) for op in monitor.ops} + context[module.prefix_op_name_].outdata=update_data(context[module.prefix_op_name_].outdata, new_data) + return out + return cc_hook + +api_register = ApiRegistry() + diff --git a/debug/accuracy_tools/kj600/kj600/features.py b/debug/accuracy_tools/kj600/kj600/features.py index b4fc8f3085..be54215241 100644 --- a/debug/accuracy_tools/kj600/kj600/features.py +++ b/debug/accuracy_tools/kj600/kj600/features.py @@ -6,6 +6,26 @@ from torch.autograd.functional import jacobian def square_sum(x: torch.tensor): return (x * x).sum() +@torch.no_grad() +def get_min(x: torch.tensor): + return torch.min(x) + + +@torch.no_grad() +def get_max(x: torch.tensor): + return torch.max(x) + + +@torch.no_grad() +def get_zeros(x: torch.tensor, eps: float): + return torch.sum(torch.abs(x) < eps) / x.numel() + +@torch.no_grad() +def get_sign_matches(x: torch.tensor, y:torch.tensor): + xs = x.sign() + ys = y.sign() + same_direction_ratio = ((xs * ys).sum()/ys.numel() + 1)/2 + return same_direction_ratio @torch.no_grad() def eff_rank(param: torch.tensor, threshold=1e-10): diff --git a/debug/accuracy_tools/kj600/kj600/module_hook.py b/debug/accuracy_tools/kj600/kj600/module_hook.py index 233b000f88..8a35c646f5 100644 --- a/debug/accuracy_tools/kj600/kj600/module_hook.py +++ b/debug/accuracy_tools/kj600/kj600/module_hook.py @@ -1,25 +1,21 @@ import os import uuid +import json from collections import defaultdict -from typing import List from datetime import datetime +from functools import partial import torch -from torch.nn.modules.module import register_module_forward_hook import torch.distributed as dist from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook -from torch.utils.tensorboard import SummaryWriter -from kj600.features import square_sum from kj600.module_spec_verifier import get_config, validate_config_spec -from kj600.optimizer_collect import MixPrecsionOptimizerMon, print_rank_0 -from kj600.features import eff_rank +from kj600.optimizer_collect import MixPrecsionOptimizerMon, print_rank_0, OptimizerMonFactory, MegatronDistributedOptimizerMon +from kj600.features import eff_rank, get_sign_matches from kj600.visualizer import HeatmapVisualizer - - -def get_summary_writer_tag_name(module_or_param_name:str, tag:str, rank): - if rank is None: - return f"{module_or_param_name}/{tag}" - else: - return f"{module_or_param_name}/{rank}/{tag}" +from kj600.anomaly_detect import AnomalyScanner, SummaryWriterWithAD +from kj600.anomaly_inform import AnomalyInformFactory +from kj600.module_metric import get_metrics, write_metrics_tensorboard, get_summary_writer_tag_name +from kj600.distributed.wrap_distributed import api_register, create_hook +from kj600.utils import print_warn_log, print_info_log, get_param_struct class ModuleHookContext: @@ -47,41 +43,80 @@ class ModuleHookContext: class OptimizerContext: def __init__(self) -> None: self.step = 0 - self.param_gnorm = defaultdict(float) # norm of grad - self.param_exp_avg_norm = defaultdict(float) # norm of expection of gradient average (m_{t-1}) - self.param_exp_avg_sign = defaultdict(int) # sign of expection of gradient average (m_{t-1}) - self.param_mg_direction = defaultdict(float) # ratio of parameters in same direction between g_{t} and m_{t-1} - self.param_exp_avg_sq_norm = defaultdict(float) # norm of expection of gradient square (v_{t-1}) - self.param_effective_rank = defaultdict(float) # ratio of parameters above a threshold - self.param_adam_update = defaultdict() # distribution of update (m_t/(v_t**0.5+eps)) - self.param_adam_ratio = defaultdict() # distribution of ratio (m_t/v_t**0.5) + self.param_effective_rank = defaultdict(float) + self.param_mg_direction = defaultdict(float) + self.param_adam_update = defaultdict() + self.param_adam_ratio = defaultdict() + self.param_weight_grad = defaultdict() + self.param_exp_avg = defaultdict() + self.param_exp_avg_sq = defaultdict() + self.metric_list = [] + +class CommunicationContext: + def __init__(self) -> None: + self.indata = {} + self.outdata = {} + + def reset(self): + self.indata = {} + self.outdata = {} class TrainerMon: - + @staticmethod def set_wrapped_optimizer(_wrapped_optimizer): MixPrecsionOptimizerMon.set_wrapped_optimizer(_wrapped_optimizer) - def __init__(self, config_file_path) -> None: + # opt_ty: "Megatron_Float16OptimizerWithFloat16Params" or "Megatron_DistributedOptimizer" + def __init__(self, config_file_path, params_have_main_grad=True, opt_ty=None) -> None: self.module_fwd_hook_context_by_module = defaultdict(ModuleHookContext) self.module_bwd_hook_context_by_module = defaultdict(ModuleHookContext) self.optimizer_context = defaultdict(OptimizerContext) - self.params_have_main_grad = True + self.cc_context = defaultdict(CommunicationContext) + self.params_have_main_grad = params_have_main_grad self.config = get_config(config_file_path) - self.module_rank_list = [int(rank) for rank in self.config.get("module_ranks", "").split(',') if rank.strip()] + self.module_rank_list = self.config.get("module_ranks", []) + self.eps = self.config.get('eps', 1e-8) + self.ops = self.config.get('ops', []) + self.xy_distribution = self.config.get('xy_distribution', False) + if not self.xy_distribution: + print_rank_0("> module input/output input_grad/output_grad is not monitored. ") self.ur_distribution = self.config.get('ur_distribution', False) + if not self.ur_distribution: + print_rank_0("> update vector and ratio vector of adam is not monitored. ") + self.mv_distribution = self.config.get("mv_distribution", False) + if not self.mv_distribution: + print_rank_0("> momentum and variance of adam is not monitored. ") + self.wg_distribution = self.config.get("wg_distribution", False) + if not self.wg_distribution: + print_rank_0("> weight grad of specified module is not monitored. ") self.mg_direction = self.config.get('mg_direction', False) + if not self.mg_direction: + print_rank_0('> grad and momentum direction will not be compared.') + self.cc_distribution = self.config.get("cc_distribution", {}) + if not self.cc_distribution.get('enable', False): + print_rank_0("> cc operator is not monitored.") + else: + self.cc_codeline = self.cc_distribution.get('cc_codeline', []) + api_register.initialize_hook(partial(create_hook, context=self.cc_context, monitor=self)) + api_register.redirect_api() + alert_setting = self.config.get('alert', {"rules":[]}) + self.alert_rules = AnomalyScanner.load_rules(alert_setting["rules"]) + + anomaly_inform = AnomalyInformFactory.create_informer(**alert_setting["inform"]) if "inform" in alert_setting else None + self.optimizer_hooked = False output_base_dir = os.getenv('KJ600_OUTPUT_DIR', './kj600_output') cur_time = datetime.now().strftime('%b%d_%H-%M-%S') unique_id = str(uuid.uuid4())[:8] if dist.is_initialized(): if (dist.get_rank() in self.module_rank_list) or len(self.module_rank_list) == 0: - self.summary_writer = SummaryWriter(os.path.join(output_base_dir, f"{cur_time}-rank{dist.get_rank()}-{unique_id}")) + self.summary_writer = SummaryWriterWithAD( + os.path.join(output_base_dir, f"{cur_time}-rank{dist.get_rank()}-{unique_id}"), self.alert_rules, anomaly_inform) else: - self.summary_writer = SummaryWriter(os.path.join(output_base_dir, f"{cur_time}-{unique_id}")) + self.summary_writer = SummaryWriterWithAD(os.path.join(output_base_dir, f"{cur_time}-{unique_id}"), self.alert_rules, anomaly_inform) # A HeatmapVisualizer instance is associated with an image self.update_heatmap_visualizer = defaultdict(HeatmapVisualizer) self.ratio_heatmap_visualizer = defaultdict(HeatmapVisualizer) @@ -90,21 +125,38 @@ class TrainerMon: self.param_name_list = [] self.param2name = defaultdict(str) - self.mix_precision_optimizer_mon = MixPrecsionOptimizerMon() + self.mix_precision_optimizer_mon = OptimizerMonFactory.create_optimizer_mon(opt_ty) + if opt_ty is None: + assert not self.ur_distribution, "ur_distribution cannot be enabled with unknown optimizer." + assert not self.mv_distribution, "mv_distribution cannot be enabled with unknown optimizer." + self.print_struct = self.config.get("print_struct", False) + self.module_struct = {} return - + def __del__(self): if hasattr(self, "summary_writer"): self.summary_writer.close() - def _hook_module(self, target_name:str, module: torch.nn.Module, fwd_or_bkd): - paths = target_name.split('.') + def _smallest_rank_print(self, msg): + if dist.is_initialized(): + if dist.get_rank() == min(self.module_rank_list): + print_info_log(msg) + else: + print_info_log(msg) + + def _hook_module(self, target_names, module: torch.nn.Module, fwd_or_bkd): if '_modules' not in module.__dict__: # nothing to hook return 0 - + def fwd_hook_fun(module, module_input, module_output): - context = self.module_fwd_hook_context_by_module[module] + context: ModuleHookContext = self.module_fwd_hook_context_by_module[module] + if self.print_struct: + self.module_struct[context.module_name].update( + {"input": f"{get_param_struct(module_input)}", "output": f"{get_param_struct(module_output)}"}) + return + if not self.xy_distribution: + return if not context.format_by_arg: context.set_format_by_arg('input', self.config['targets']) context.set_format_by_arg('output', self.config['targets']) @@ -114,22 +166,35 @@ class TrainerMon: context.focused_out_col = validate_config_spec(context.format_by_arg['output'], module_output, context.module_name, 'output') context.verified = True # expect output be tensor type + tbtag_tensor_map = {} if not context.ignore_in: cared_input = module_input if context.focused_in_col is None else module_input[context.focused_in_col] - cared_input_cal_result = square_sum(cared_input) - else: - cared_input_cal_result = None + tbtag_tensor_map.update(self.build_tbtag_tensor_map(context.module_name, 'input', cared_input)) cared_output = module_output if context.focused_out_col is None else module_output[context.focused_out_col] - context.actv.append((cared_input_cal_result, square_sum(cared_output))) + tbtag_tensor_map.update(self.build_tbtag_tensor_map(context.module_name, 'output', cared_output)) + metric_dict = {} + for metric_name in self.ops: + metric_dict[metric_name] = get_metrics(metric_name, tbtag_tensor_map, self.eps) + if context.micro_step == 0 and context.actv: + print_warn_log( + f"actv context of {context.module_name} is not empty when first micro_step, maybe something wrong happened. Now clear it.") + context.actv.clear() + context.actv.append(metric_dict) context.micro_step += 1 if context.micro_step == self.micro_batch_number: context.micro_step = 0 context.step += 1 return - + def bwd_hook_fun(module, input_grad, output_grad): - context = self.module_bwd_hook_context_by_module[module] + context: ModuleHookContext = self.module_bwd_hook_context_by_module[module] + if self.print_struct: + self.module_struct[context.module_name].update( + {"input_grad": f"{get_param_struct(input_grad)}", "output_grad": f"{get_param_struct(output_grad)}"}) + return + if not self.xy_distribution: + return if not context.format_by_arg: context.set_format_by_arg('input_grad', self.config['targets']) context.set_format_by_arg('output_grad', self.config['targets']) @@ -138,44 +203,53 @@ class TrainerMon: context.focused_in_col = validate_config_spec(context.format_by_arg['input_grad'], input_grad, context.module_name, 'input_grad') context.focused_out_col = validate_config_spec(context.format_by_arg['output_grad'], output_grad, context.module_name, 'output_grad') context.verified = True + + tbtag_tensor_map = {} if not context.ignore_in: cared_input_grad = input_grad if context.focused_in_col is None else input_grad[context.focused_in_col] - cared_input_grad_cal_result = square_sum(cared_input_grad) if cared_input_grad is not None else torch.tensor(0.) - else: - cared_input_grad_cal_result = None + tbtag_tensor_map.update(self.build_tbtag_tensor_map(context.module_name, 'input_grad', cared_input_grad)) cared_output_grad = output_grad if context.focused_out_col is None else output_grad[context.focused_out_col] - context.actvgrad.append((cared_input_grad_cal_result, square_sum(cared_output_grad))) + tbtag_tensor_map.update(self.build_tbtag_tensor_map(context.module_name, 'output_grad', cared_output_grad)) + metric_dict = {} + for metric_name in self.ops: + metric_dict[metric_name] = get_metrics(metric_name, tbtag_tensor_map, self.eps) + if context.micro_step == 0 and context.actvgrad: + print_warn_log(f"actvgrad context of {context.module_name} is not empty when first micro_step, maybe something wrong happened. Now clear it.") + context.actvgrad.clear() + context.actvgrad.append(metric_dict) + context.micro_step += 1 if context.micro_step == self.micro_batch_number: context.micro_step = 0 context.step += 1 return - + + hooked_count = 0 for name, submodule in module.named_modules(): - if name == target_name: + self.module_struct[name] = {} + if name in target_names: submodule.register_forward_hook(fwd_hook_fun) self.module_fwd_hook_context_by_module[submodule] = ModuleHookContext(name) submodule.register_full_backward_hook(bwd_hook_fun) self.module_bwd_hook_context_by_module[submodule] = ModuleHookContext(name) print_rank_0(f"> {name} is monitored successfully") - return 1 - return 0 + hooked_count += 1 + return hooked_count - def hook_modules(self, model:torch.nn.Module, global_batch_size, dp, micro_batch_size, fwd_or_bkd, params_have_main_grad=True): + def hook_modules(self, model:torch.nn.Module, grad_acc_steps): # fwd=0, bkd=1 # targets is module name list like ["xx.xxx1", "xxx.xxx2"] which can be obtained when first run. print_rank_0("> module names:") for name, _ in model.named_modules(): print_rank_0(f"\t{name}") - self.micro_batch_number = global_batch_size // dp // micro_batch_size - + self.micro_batch_number = grad_acc_steps + if not self.module_rank_list or (dist.is_initialized() and dist.get_rank() in self.module_rank_list): - hooked = 0 - for target, _ in self.config['targets'].items(): - hooked += self._hook_module(target, model, fwd_or_bkd=0) - print_rank_0(f"> {hooked} out of {len(self.config['targets'])} are monitored.") + targets = [x for x, _ in model.named_modules()] if self.print_struct else self.config['targets'].keys() + hooked_count = self._hook_module(targets, model, fwd_or_bkd=0) + print_rank_0(f"> {hooked_count} out of {len(self.config['targets'])} are monitored.") else: - return + return if not self.optimizer_hooked: self.optimizer_hooked = True @@ -187,72 +261,125 @@ class TrainerMon: self.param_name_list.append(name) self.param2name[param] = name self.hook_optimizer() - self.params_have_main_grad = params_have_main_grad return + + def build_tbtag_tensor_map(self, module_name, tag, tensor): + metrics = {} + rank = dist.get_rank() if dist.is_initialized() else None + key = get_summary_writer_tag_name(module_name, tag, rank) + if tensor is not None: + metrics[key] = tensor + return metrics + + def generate_param_metrics(self, tag, param_tensor): + metrics = {} + rank = dist.get_rank() if dist.is_initialized() else None + for param, name in self.param2name.items(): + key = get_summary_writer_tag_name(name, tag, rank) + if name not in param_tensor or param_tensor[name] is None: + continue + metrics[key] = param_tensor[name] + return metrics + def generate_cc_metrics(self, cc_name, cc_tensor): + metrics = defaultdict(dict) + rank = dist.get_rank() if dist.is_initialized() else None + for op, tag2tensor in cc_tensor.indata.items(): + for tag, tensor in tag2tensor.items(): + key = get_summary_writer_tag_name(cc_name, tag, rank) + metrics[op].update({key: tensor}) + for op, tag2tensor in cc_tensor.outdata.items(): + for tag, tensor in tag2tensor.items(): + key = get_summary_writer_tag_name(cc_name, tag, rank) + metrics[op].update({key: tensor}) + cc_tensor.reset() + return metrics + + def write_xy_tb(self, step): + if not self.xy_distribution: + return + for _, fwd_context in self.module_fwd_hook_context_by_module.items(): + if not len(fwd_context.actv) == self.micro_batch_number: + print_warn_log(f"fwd_context.actv not equal to micro_batch_number: {len(fwd_context.actv)}, {self.micro_batch_number}") + for metric_name in self.ops: + write_metrics_tensorboard(metric_name, self.summary_writer, fwd_context.actv, step) + fwd_context.actv.clear() + + for _, bwd_context in self.module_bwd_hook_context_by_module.items(): + if not len(bwd_context.actvgrad) == self.micro_batch_number: + print_warn_log(f"bwd_context.actvgrad not equal to micro_batch_number: {len(bwd_context.actvgrad)}, {self.micro_batch_number}") + for metric_name in self.ops: + write_metrics_tensorboard(metric_name, self.summary_writer, bwd_context.actvgrad, step) + bwd_context.actvgrad.clear() + def hook_optimizer(self): # in DDP by default use params_have_main_grad def optimizer_pre_step_hook(optimizer, args, kwargs): + if self.print_struct and not all(value == {} for value in self.module_struct.values()): + self._smallest_rank_print("> module struct:") + self._smallest_rank_print(json.dumps(self.module_struct, indent=4)) + raise Exception("exit after first step when print model struct") context = self.optimizer_context[optimizer] - context.param_exp_avg_norm, context.param_exp_avg_sign, context.param_exp_avg_sq_norm, context.param_adam_update, context.param_adam_ratio = self.mix_precision_optimizer_mon.fetch_mv( - optimizer, self.param2name, self.update_heatmap_visualizer, self.ratio_heatmap_visualizer, self.ur_distribution, self.mg_direction) + context.param_exp_avg, context.param_exp_avg_sq, context.param_adam_update, context.param_adam_ratio = self.mix_precision_optimizer_mon.fetch_mv(self, + optimizer, self.param2name) for param, name in self.param2name.items(): - grad = param.main_grad if self.params_have_main_grad else param.grad - context.param_gnorm[name] = grad.detach().norm() if "params_effrank" in self.config and name in self.config["params_effrank"]: context.param_effective_rank[name] = eff_rank(param.detach()) - + grad = param.main_grad if self.params_have_main_grad else param.grad + if grad is None: + print_warn_log(f"grad is None: {name}, maybe something wrong happened.") + continue + if self.wg_distribution: + context.param_weight_grad[name] = grad if self.mg_direction: - if name in context.param_exp_avg_sign: - g_sign = grad.detach().sign() - m_sign = context.param_exp_avg_sign.pop(name) - same_direction_ratio = ((m_sign * g_sign).sum().item()/m_sign.numel() + 1)/2 + if context.step == 0: + same_direction_ratio = torch.tensor(1.) else: - same_direction_ratio = 1 + same_direction_ratio = get_sign_matches(grad, context.param_exp_avg[name]) context.param_mg_direction[name] = same_direction_ratio + tbtag_tensor_map = {} + if self.wg_distribution: + tbtag_tensor_map.update(self.generate_param_metrics('weight_grad', context.param_weight_grad)) + if self.mv_distribution: + tbtag_tensor_map.update(self.generate_param_metrics('exp_avg', context.param_exp_avg)) + tbtag_tensor_map.update(self.generate_param_metrics('exp_avg_sq', context.param_exp_avg_sq)) + if self.mg_direction: + tbtag_tensor_map.update(self.generate_param_metrics('mg_direction', context.param_mg_direction)) + # if not tbtag_tensor_map: + # return + metric_dict = {} + for metric_name in self.ops: + metric_dict[metric_name] = get_metrics(metric_name, tbtag_tensor_map, self.eps) + if self.cc_distribution: + for k, c in self.cc_context.items(): + cc_metrics = self.generate_cc_metrics(k, c) + for op, m in cc_metrics.items(): + metric_dict[op].update(m) + if not metric_dict: + return + context.metric_list.append(metric_dict) return - + def optimizer_post_step_hook(optimizer, args, kwargs): context = self.optimizer_context[optimizer] rank = dist.get_rank() if dist.is_initialized() else None - for _, fwd_context in self.module_fwd_hook_context_by_module.items(): - if not len(fwd_context.actv) == self.micro_batch_number: - raise Exception(f"fwd_context.actv not equal to micro_batch_number: {len(fwd_context.actv)}, {self.micro_batch_number}") - if not fwd_context.ignore_in: - x_norm = sum([x.item() for x, _ in fwd_context.actv]) - self.summary_writer.add_scalar(get_summary_writer_tag_name(fwd_context.module_name, 'input', rank), x_norm, context.step) - y_norm = sum([y.item() for _, y in fwd_context.actv]) - self.summary_writer.add_scalar(get_summary_writer_tag_name(fwd_context.module_name, 'output', rank), y_norm, context.step) - fwd_context.actv.clear() - - for _, bwd_context in self.module_bwd_hook_context_by_module.items(): - if not len(bwd_context.actvgrad) == self.micro_batch_number: - raise Exception(f"fwd_context.actvgrad not equal to micro_batch_number: {len(fwd_context.actvgrad)}, {self.micro_batch_number}") - if not bwd_context.ignore_in: - x_grad_norm = sum([x.item() for x, _ in bwd_context.actvgrad]) - self.summary_writer.add_scalar(get_summary_writer_tag_name(bwd_context.module_name, 'input_grad', rank), x_grad_norm, context.step) - y_grad_norm = sum([y.item() for _, y in bwd_context.actvgrad]) - self.summary_writer.add_scalar(get_summary_writer_tag_name(bwd_context.module_name, 'output_grad', rank), y_grad_norm, context.step) - bwd_context.actvgrad.clear() - - for param_name, grad_norm in context.param_gnorm.items(): - self.summary_writer.add_scalar(get_summary_writer_tag_name(param_name, 'weight_grad', rank), grad_norm.item(), context.step) - - for param_name, exp_avg_norm in context.param_exp_avg_norm.items(): - self.summary_writer.add_scalar(get_summary_writer_tag_name(param_name, 'exp_avg_norm', rank), exp_avg_norm.item(), context.step) - for param_name, exp_avg_sq_norm in context.param_exp_avg_sq_norm.items(): - self.summary_writer.add_scalar(get_summary_writer_tag_name(param_name, 'exp_avg_sq_norm', rank), exp_avg_sq_norm.item(), context.step) + + self.write_xy_tb(context.step) + if self.ur_distribution: for param_name, _ in context.param_adam_update.items(): self.update_heatmap_visualizer[param_name].visualize(get_summary_writer_tag_name(param_name, 'adam_update', rank), context.step, self.summary_writer) for param_name, _ in context.param_adam_ratio.items(): self.ratio_heatmap_visualizer[param_name].visualize(get_summary_writer_tag_name(param_name, 'adam_ratio', rank), context.step, self.summary_writer) - if self.mg_direction: - for param_name, mg_direction in context.param_mg_direction.items(): - self.summary_writer.add_scalar(get_summary_writer_tag_name(param_name, 'adam_mg_direction', rank), mg_direction, context.step) + + for metric_name in self.ops: + if not context.metric_list: + break + write_metrics_tensorboard(metric_name, self.summary_writer, context.metric_list, context.step) + context.metric_list.clear() context.step += 1 return diff --git a/debug/accuracy_tools/kj600/kj600/module_metric.py b/debug/accuracy_tools/kj600/kj600/module_metric.py new file mode 100644 index 0000000000..d42749d2be --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/module_metric.py @@ -0,0 +1,125 @@ +import math +import statistics + +from kj600.features import square_sum, get_max, get_min, get_zeros + + +def get_summary_writer_tag_name(module_or_param_name:str, tag:str, rank): + if rank is None: + return f"{module_or_param_name}/{tag}" + else: + return f"{module_or_param_name}/{rank}/{tag}" + + +# 用于存储所有metric实现类的注册表 +config_metric_registry = {} + + +def register_config_metric(key, cls=None): + """装饰器 用于注册Metric的实现类""" + if cls is None: + # 无参数时,返回装饰器函数 + return lambda cls: register_config_metric(key, cls) + config_metric_registry[key] = cls + return cls + + +class Metric(object): + @staticmethod + def get_metric_value(tensor, eps): + pass + + def get_metrics(self, tag2tensor: dict, eps): + metrics_dict = {} + for tag, tensor in tag2tensor.items(): + metrics_dict[tag] = self.get_metric_value(tensor, eps) + return metrics_dict + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): + pass + + +@register_config_metric("min") +class MinMetric(Metric): + @staticmethod + def get_metric_value(tensor, eps): + return get_min(tensor) + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): + for key in metric_value[0][metric_name].keys(): + min_value = min([item[metric_name][key].item() for item in metric_value]) + summary_writer.add_scalar(f'{key}_min', min_value, step) + + +@register_config_metric("max") +class MaxMetric(Metric): + @staticmethod + def get_metric_value(tensor, eps): + return get_max(tensor) + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): + for key in metric_value[0][metric_name].keys(): + max_value = max([item[metric_name][key].item() for item in metric_value]) + summary_writer.add_scalar(f'{key}_max', max_value, step) + + +@register_config_metric("norm") +class NormMetric(Metric): + @staticmethod + def get_metric_value(tensor, eps): + return square_sum(tensor) + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): + for key in metric_value[0][metric_name].keys(): + norm_value = math.sqrt(sum([item[metric_name][key].item() for item in metric_value])) + summary_writer.add_scalar(f'{key}_norm', norm_value, step) + + +@register_config_metric("zeros") +class ZerosMetric(Metric): + @staticmethod + def get_metric_value(tensor, eps): + return get_zeros(tensor, eps) + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): + for key in metric_value[0][metric_name].keys(): + zeros_value = statistics.mean([item[metric_name][key].item() for item in metric_value]) + summary_writer.add_scalar(f'{key}_zeros', zeros_value, step) + + +@register_config_metric("id") +class IdentMetric(Metric): + @staticmethod + def get_metric_value(tensor, eps): + if tensor.dim() != 0: + return None + return tensor + + @staticmethod + def metric_tensorboard(metric_name, summary_writer, metric_value, step): #metric_value is a dict, key is parameter name and value is a list of scalar tensor + if len(metric_value) == 1: + for key, value in metric_value[0][metric_name].items(): + if not value: + continue + summary_writer.add_scalar(f'{key}_identical', value.item(), step) + + +def get_metrics(metric_name, tag2tensor, eps): + try: + fun_metric = config_metric_registry[metric_name] + return fun_metric().get_metrics(tag2tensor, eps) + except KeyError as e: + raise ValueError(f"Not supported this metric, expected metric: {config_metric_registry.keys()}, actual metric: {metric_name}") + + +def write_metrics_tensorboard(metric_name, summary_writer, metric_value, step): + try: + fun_metric = config_metric_registry[metric_name] + return fun_metric.metric_tensorboard(metric_name, summary_writer, metric_value, step) + except KeyError as e: + raise ValueError(f"Not supported this metric, expected metric: {config_metric_registry.keys()}, actual metric: {metric_name}") diff --git a/debug/accuracy_tools/kj600/kj600/optimizer_collect.py b/debug/accuracy_tools/kj600/kj600/optimizer_collect.py index 44f478416c..dfb473ca07 100644 --- a/debug/accuracy_tools/kj600/kj600/optimizer_collect.py +++ b/debug/accuracy_tools/kj600/kj600/optimizer_collect.py @@ -22,20 +22,10 @@ class MixPrecsionOptimizerMon: def __init__(self) -> None: self.fp16_to_fp32_param = {} - - # parameter tensors we want to monitor and their names are in params2name_dict - # base_optimizer is pytorch optimizer, wrapped_optimizer is a normal object with base_optimizer - def fetch_mv(self, torch_opt, params2name, update_heatmap_visualizer, ratio_heatmap_visualizer, ur_distribution, mg_direction): - mix_prec_opt = MixPrecsionOptimizerMon.wrapped_optimizer - - if not self.fp16_to_fp32_param and mix_prec_opt is not None: - for fp16_group, fp32_group in zip(mix_prec_opt.float16_groups, mix_prec_opt.fp32_from_float16_groups): - for fp16_param, fp32_param in zip(fp16_group, fp32_group): - self.fp16_to_fp32_param[fp16_param] = fp32_param - exp_avg_norm_dict = defaultdict(float) - exp_avg_sign_dict = defaultdict(int) - exp_avg_sq_norm_dict = defaultdict(float) + def _fetch_mv_in_adam(self, params2name, torch_opt, monitor): + exp_avg_dict = defaultdict(float) + exp_avg_sq_dict = defaultdict(float) update_dict = defaultdict() ratio_dict = defaultdict() @@ -46,16 +36,52 @@ class MixPrecsionOptimizerMon: if param in torch_opt.state: exp_avg = torch_opt.state[param]["exp_avg"] exp_avg_sq = torch_opt.state[param]["exp_avg_sq"] - exp_avg_norm = exp_avg.detach().norm() - exp_avg_sq_norm = exp_avg_sq.detach().norm() - exp_avg_norm_dict[name] = exp_avg_norm - exp_avg_sq_norm_dict[name] = exp_avg_sq_norm - if mg_direction: - exp_avg_sign_dict[name] = exp_avg.detach().sign() - if ur_distribution: + if monitor.mv_distribution: + exp_avg_dict[name] = exp_avg + exp_avg_sq_dict[name] = exp_avg_sq + if monitor.mg_direction: + exp_avg_dict[name] = exp_avg + if monitor.ur_distribution: update_dict[name] = exp_avg / (torch.sqrt(exp_avg_sq) + torch_opt.defaults['eps']) ratio_dict[name] = exp_avg / torch.sqrt(exp_avg_sq) - update_heatmap_visualizer[name].pre_cal(update_dict[name]) - ratio_heatmap_visualizer[name].pre_cal(ratio_dict[name]) - - return exp_avg_norm_dict, exp_avg_sign_dict, exp_avg_sq_norm_dict, update_dict, ratio_dict + monitor.update_heatmap_visualizer[name].pre_cal(update_dict[name]) + monitor.ratio_heatmap_visualizer[name].pre_cal(ratio_dict[name]) + return exp_avg_dict, exp_avg_sq_dict, update_dict, ratio_dict + + # parameter tensors we want to monitor and their names are in params2name_dict + # base_optimizer is pytorch optimizer, wrapped_optimizer is a normal object with base_optimizer + def fetch_mv(self, monitor, torch_opt, params2name): + mix_prec_opt = MixPrecsionOptimizerMon.wrapped_optimizer + + if not self.fp16_to_fp32_param and mix_prec_opt is not None: + for fp16_group, fp32_group in zip(mix_prec_opt.float16_groups, mix_prec_opt.fp32_from_float16_groups): + for fp16_param, fp32_param in zip(fp16_group, fp32_group): + self.fp16_to_fp32_param[fp16_param] = fp32_param + return self._fetch_mv_in_adam(params2name, torch_opt, monitor) + +class MegatronDistributedOptimizerMon(MixPrecsionOptimizerMon): + def fetch_mv(self, monitor, torch_opt, params2name): + mix_prec_opt = MixPrecsionOptimizerMon.wrapped_optimizer + assert hasattr(mix_prec_opt, "model_float16_groups") and hasattr(mix_prec_opt, "shard_fp32_from_float16_groups"), \ + "megatron distributed optimizer should have model_float16_groups and shard_fp32_from_float16_groups, if not, please check megatron-lm version" + if not self.fp16_to_fp32_param and mix_prec_opt is not None: + for fp16_group, shard_fp32_group in zip(mix_prec_opt.model_float16_groups, mix_prec_opt.shard_fp32_from_float16_groups): + for fp16_param, shard_fp32_param in zip(fp16_group, shard_fp32_group): + self.fp16_to_fp32_param[fp16_param] = shard_fp32_param + + return self._fetch_mv_in_adam(params2name, torch_opt, monitor) + +class DummyOptimizerMon(MixPrecsionOptimizerMon): + def fetch_mv(self, monitor, torch_opt, params2name): + return None, None, None, None + +class OptimizerMonFactory: + @staticmethod + def create_optimizer_mon(opt_ty:str): + if opt_ty == "Megatron_Float16OptimizerWithFloat16Params": + return MixPrecsionOptimizerMon() + if opt_ty == "Megatron_DistributedOptimizer": + return MegatronDistributedOptimizerMon() + if opt_ty == None or opt_ty == "unknown": + return DummyOptimizerMon() + assert opt_ty != None, "opt_ty should be Megatron_Float16OptimizerWithFloat16Params or Megatron_DistributedOptimizer or None or unknown" \ No newline at end of file diff --git a/debug/accuracy_tools/kj600/kj600/unittest/config_1.json b/debug/accuracy_tools/kj600/kj600/unittest/config_1.json deleted file mode 100644 index a3b10f731d..0000000000 --- a/debug/accuracy_tools/kj600/kj600/unittest/config_1.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "targets": { - "fc": {"input": "tuple[1]:0", "output": "tensor", "input_grad":"tuple[1]:0", "output_grad":"tuple[1]:0"}, - "relu": {"input": "tuple[1]:0", "output": "tensor", "input_grad":"tuple[1]:0", "output_grad":"tuple[1]:0"} - }, - "ur_distribution": true, - "mg_direction": true -} \ No newline at end of file diff --git a/debug/accuracy_tools/kj600/kj600/unittest/test_features.py b/debug/accuracy_tools/kj600/kj600/unittest/test_features.py deleted file mode 100644 index bc8c6dd71a..0000000000 --- a/debug/accuracy_tools/kj600/kj600/unittest/test_features.py +++ /dev/null @@ -1,33 +0,0 @@ -import unittest -import torch -import torch.nn as nn -import torch_npu -from kj600.features import eff_rank - - -class TestFeatureCalculation(unittest.TestCase): - def test_effective_rank(self): - param = torch.randn(10, 10).npu() - rank = eff_rank(param) - self.assertTrue(rank.item() >= 1) - - def test_lambda_max(self): - pass - # input_dim = 10 - # hidden_dim = 100 - # output_dim = 1 - # num_samples = 100 - # X = torch.randn(num_samples, input_dim) - # network = nn.Sequential( - # nn.Linear(input_dim, hidden_dim), - # nn.ReLU(), - # nn.Linear(hidden_dim, output_dim) - # ) - # Y = network(X) - # Y.backward() - # for name, param in network.named_parameters(): - # lm = lambda_max(param) - - -if __name__ == "__main__": - unittest.main() \ No newline at end of file diff --git a/debug/accuracy_tools/kj600/kj600/unittest/test_module_hook.py b/debug/accuracy_tools/kj600/kj600/unittest/test_module_hook.py deleted file mode 100644 index f077fc7004..0000000000 --- a/debug/accuracy_tools/kj600/kj600/unittest/test_module_hook.py +++ /dev/null @@ -1,78 +0,0 @@ -import argparse -import torch_npu -import torch -import torch.nn.functional as F -from kj600.module_hook import TrainerMon # Modify PYTHONPATH to import TrainerMon -#from hook_api import reg_grad_hook, reg_grad_one_hook, reg_module_backward_hook, reg_module_forward_hook -#from torch.cuda.amp import GradScaler - -from torch.npu.amp import GradScaler - - -# from ptdbg_ascend import PrecisionDebugger as PD -# from monitor import GradientMonitor - -print(torch_npu.__version__) - -#debugger = PD(dump_path="./dump/", hook_name="dump", step=[1, 2, 3], enable_dataloader=False) -#debugger.configure_hook(mode="list", scope=["optim_Adam_step"], ) - -parser = argparse.ArgumentParser(prog="kj600 debug", description="kj600 sample code", epilog="") -parser.add_argument("-o", "--out_dir", type=str, default=".") -args = parser.parse_args() -DTYPE = torch.float32 - - -class Model(torch.nn.Module): - def __init__(self): - super().__init__() - self.fc = torch.nn.Linear(784, 10, dtype=DTYPE) - self.relu = torch.nn.ReLU() - - def forward(self, x): - return self.relu(self.fc(x).type(DTYPE)) - -npu = torch.device('npu:0') -net = Model().to(device=npu) - -config = { - "targets": { - "fc": {"input": "tuple[2]:0", "output": "tensor::"}, - "relu": {"input": "..", "output": ".."} - } -} -# reg_grad_hook(net, hook_factory=hook_factory, config=config) -# reg_grad_one_hook(net, hook=monitor_hook, config=config) -# net.fc.register_forward_hook(get_actv_hook("fc")) -# reg_module_forward_hook(net, module_fwd_hook, config) -# reg_module_backward_hook(net, module_bwd_hook, config) -optimizer = torch.optim.Adam(net.parameters(), lr=0.0001) - -hooker = TrainerMon('./kj600/unittest/config_1.json') -hooker.hook_modules(model=net, global_batch_size=2, dp=1, micro_batch_size=2, fwd_or_bkd=0, params_have_main_grad=False) -# hooker.hook_optimizer(optimizer) - - -class ToyDataset(torch.utils.data.Dataset): - def __init__(self): - self.data = torch.randn(16, 784, dtype=DTYPE, requires_grad=True) - self.labels = torch.randint(low=0, high=9, size=(16,)) - - def __len__(self): - return len(self.labels) - - def __getitem__(self, idx): - return self.data[idx].to(npu), self.labels[idx].to(npu) - -train_ds = ToyDataset() -train_loader = torch.utils.data.DataLoader(train_ds, shuffle=True, batch_size=2) - - -# scaler = GradScaler() -for (inputs, labels) in train_loader: - optimizer.zero_grad() - outputs = net(inputs) - loss = F.cross_entropy(outputs, labels) - - loss.backward() - optimizer.step() diff --git a/debug/accuracy_tools/kj600/kj600/utils.py b/debug/accuracy_tools/kj600/kj600/utils.py new file mode 100644 index 0000000000..fae87693e0 --- /dev/null +++ b/debug/accuracy_tools/kj600/kj600/utils.py @@ -0,0 +1,47 @@ +import os +import time +import sys + + +def _print_log(level, msg, end='\n'): + current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))) + pid = os.getgid() + print(current_time + "(" + str(pid) + ")-[" + level + "]" + msg, end=end) + sys.stdout.flush() + + +def print_info_log(info_msg, end='\n'): + """ + Function Description: + print info log. + Parameter: + info_msg: the info message. + """ + _print_log("INFO", info_msg, end=end) + + +def print_error_log(error_msg): + """ + Function Description: + print error log. + Parameter: + error_msg: the error message. + """ + _print_log("ERROR", error_msg) + + +def print_warn_log(warn_msg): + """ + Function Description: + print warn log. + Parameter: + warn_msg: the warning message. + """ + _print_log("WARNING", warn_msg) + +def get_param_struct(param): + if isinstance(param, tuple): + return f"tuple[{len(param)}]" + if isinstance(param, list): + return f"list[{len(param)}]" + return "tensor" diff --git "a/debug/accuracy_tools/kj600/\350\256\255\347\273\203\347\212\266\346\200\201\347\233\221\346\216\247\345\267\245\345\205\267\346\200\247\350\203\275\345\237\272\347\272\277.md" "b/debug/accuracy_tools/kj600/\350\256\255\347\273\203\347\212\266\346\200\201\347\233\221\346\216\247\345\267\245\345\205\267\346\200\247\350\203\275\345\237\272\347\272\277.md" new file mode 100644 index 0000000000..90461fa5c8 --- /dev/null +++ "b/debug/accuracy_tools/kj600/\350\256\255\347\273\203\347\212\266\346\200\201\347\233\221\346\216\247\345\267\245\345\205\267\346\200\247\350\203\275\345\237\272\347\272\277.md" @@ -0,0 +1,52 @@ +# ptdbg_ascend精度工具标准性能基线报告 + +## 环境信息 + +NPU:Atlas A2 训练系列产品 + +CPU: + +![输入图片说明](img/cpu_info.png) + +Torch:2.1.0 + +CANN:8.0.RC2 + +除上述环境信息影响性能外,被检控的模块的数量和结构会对性能产生影响,因此本次选取典型网络进行测试,并且选取耗时稳定后的步数进行测试。工具输出键小,对内存无要求。 + +## 模型信息和性能基线 + +以下场景的性能基线测试数据均为多次测试后取平均值,因此实际运行时性能数据可能会根据环境状态稍有浮动。 + +### LLAMA2-13B + +主要数据类型:BFLOAT16 + +模型层数:40 + +配置文件(采了10层): +``` +{ + "targets": { + "language_model.encoder.layers.0": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.1": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.2": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.3": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.4": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.5": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.6": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.7": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.8": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"}, + "language_model.encoder.layers.9": {"input": "tuple[2]:0", "output": "tensor", "input_grad":"tuple[2]:0", "output_grad":"tuple[1]:0"} + }, + "module_ranks": "0" +} +``` + +启动命令参数:python3 -u pretrain_gpt.py --local-rank=1 --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --sequence-parallel --num-layers 40 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --tokenizer-type Llama2Tokenizer --tokenizer-model /new_data/LLM/checkpoint_origin/llama2-13b-hf/tokenizer.model --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 2 --global-batch-size 16 --make-vocab-size-divisible-by 1 --lr 1e-6 --train-iters 5000 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --swiglu --use-flash-attn --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1e-8 --weight-decay 1e-1 --lr-warmup-fraction 0.01 --clip-grad 1.0 --adam-beta1 0.9 --initial-loss-scale 4096 --adam-beta2 0.95 --no-gradient-accumulation-fusion --load /data/LLM/checkpoint_magatron/llama2_13b_tp1_pp8 --no-load-optim --no-load-rng --use-fused-swiglu --use-fused-rotary-pos-emb --use-mc2 --bf16 --data-path /data/LLM/data_modellink/llama2_13b/alpaca_text_document --split 949,50,1 --log-interval 1 --save-interval 10000 --eval-interval 1000 --eval-iters 10 --distributed-backend nccl --save ./ckpt + +不加工具原始耗时:**4s** + +加工具后单卡耗时:**4.25s** + +加工具后多卡耗时:**4.35s** -- Gitee