!+7x
z)5N=M{2ih0v*L{|o~`~pOk!n>0y+SFd_jBG*h8Kb=v#*#L_VcCz|x_HnYa8ezy+wV
zieiSZ!T2T1kP4eS)t?A!;L6yV=5@TCH?b>K3}
zc|&0{D>p!|TmG{K6_==Aw>x8gr%FidlHXVkA1W56%;83TWE5VB(Lh@Zmx@ljhGG>g
z-VL7hy(IAVl)!yx*>8C#f3!TtktKtTf5$#qtIh7tF8^!6q8E{_{SMwE-Q
z(ii6uCuF?jPz(*yB$k9B5Z0I?3$-QWXxStLz)2%AoW^;Rd?>}S?bbyIs^D)3xjC?O
zmY&I|3e&b*RNp8EOv~6^_5$em~Us$~5f>%jSViF`d^%06?uB^Gaqn
z?tKMyyNe+Y333DhR_rSWfnT|GWa#q>hjD!`?K>K6NAs^6HVUG?Y|gqWMY4=Y((Qeu
zp4lUgnARVH1o3sPei0y2hY0xeXvuU^0$}{4X`s!z#ME~0cWaa7dvY0V&U_|jj51x3
zg2X=V*A^RA*bIa3%~uxp+lkP3wvQDZvp25y@zI2Ao9{Jqlu)JTBrf#U>@wt_y
zX44TfS>dECMC?Sq#U3ZQ5^VqA4g5{N(4ky!!<_pU5gRz=8y|hl=htXike)07m8su_9jD4s9=Si#7frrIEQtQ%1)-|05
z&Fprf5~8HY`x3`27Aq;t=&IEMvtRlt`BX5Wo|&a8XHWcWlhS=?xHY?biW366>p=NX
z(LeX}3;3$jJNb=#73rsVyOKmmOVG737WTsaSK}c^yG`>vke8S1tqezXYryWGNw3-q
zc1e|XeD$D@_wS>ILl`@lc-B!**X5~*{J;)`lc^ToenN-dq51T@UJQ&fKzSO(814uY
z_=|qP>46r-=VV}~7RpR^U*tNNL_Lf7`?#!j9?{C2bBg)=QNC!|BfDpV*SB|>uP36e
z;H!xn!HG{Mr&l_91%b_C=-?`1%vHG;O_?qFeK#M9-;82!T?aX5D!bMvLJ|Dcq=HHa
zH=T-sNAIvuEDOX0xcTg>@?-TWheJ(A7{@)WybNzhB4>OvW6^w9Dr!Tw)`
z1?Z$3`uzq!kX`xdP!l^Z3&W6}Y*d|Og5;Kg=K)s373@DB?R8#NXBKbgYiRnI7vPhX
z>8{&!f3ry3Nw2P8kO@O<$`GNjF8~5-5On^6uXXJf&6jMEjzE}!mHC6%K_)!*BzfuZz6>@{I(7pW8VSWFV
zH%Nj=HJbul61R(rg%G4~7+)A0gnacwcnLsoI^CDW#>+r#COwC;+*cD~$A*{V3R8Jn
z0Cfg+ufrXdHAmk3rz}c9K=b&nd6S*#ybvE|d8kmRaHiW~%O3>bV@kfRT{LEXoC<7G
z6`EADb7qlMF)T~;I_*Mi4FXXLfd)wyYzwsjrB-h-zu4PKoCRlTQoZs{oqegm%p+m_
zT6tXmaZhsK59jVwv5fg(z>BFpNW216IsAv?(J2|RZ5{_uRUo*#o^q)TZM48S3otKI
z=v^QNJl~@?i2I~&St^{T?k4k~D2P*~
zF_$a5R63JUatj^w>i10fYN93B;@BR)$*`wqxWFKB1-;70d`px~N=Gkja}qyby5x7-
z+6%(~$FZFx^}Gge^FmpP)c4FvH1DCb-UsMTdv
zd7{me@T$0Z9j(R6_|KZgTl+~xI(!v$@7N>b>`|c%fQ#i{%VT@VK5|@CvNVG-!$Kh`
zVRnb-cd95C22cOg*DRXkPO-$7R!21XU?(JYvfbyN<5TW2324;ncw*%2p2r0p3`Amw
z7ECtAtF9t=-T35^oolKKRG?X!x%0*FP8l+nem1e_LgDm3my4G_!*~LIbg?`t&y3%`
z&@ALcCAp}_9NBsV*ns1(MGg8Pn09j14F&M3k5L>Oo8}$#sgNQ(5?`W-Nc3dVEnTgt
z0)jpyay;P@u3}7pb%C(7cM5_4sZR!M==C5%Bs_p8J4$xYf>tL%ZQ=Rb@YABZD(MG!
zyDp|E_Tetj*coAT4U5Y0O=G_X9PFB}
zkN*W2RsD8^qO==ZG!mVMIxSFq$=ih%MKdovOEPP~`pCaVPRzdCnjkO7*iLY#^S<1q
zmIX$j{w!hXC3Y=vda}kZ0ad50O^z5a*~z8b=Mh!SMj5_;%}w})%Dpdz6J(UcM*-8+s!`m=XnxgfhY%k!Nxun^TS~m!!$A%g1(XE4Nd;7-+sWvl
z`M+{Jw0za!nmgY97jSkW8av2HYw_6j`0O{Nmt6+9bA$J%&GZY2!hPb@l-s7yCs%1k
z`A6&wK4Md{J~tfGD3(Wg%yd@=0lO1ab@mX|Z{`_Qo{Sr~>->Q#&3tork6(LU8gAVH
zz-67_GV)s9G8ERrX_NDk9<~9e_u>mIb?Vbwp?Vl|Bk!GE&!k1=2Y4T>HsoR5E?)0w
z!qU%US(0Fl@F!i
zVvWcKw7c#BxyLkbvVJQ)b|}BROa^jIBE9@^T+=j4RN>mD=udkaN~yD^4q%_1!rMmU
zQZ9U1Yy<|l3S~uO_f+Bg&+ZSRna(E1-!W-JDSvJq8|!Pm$co6?5#Q6{dPf+!|6&eN
z%b@!QJ7L@#7O94Qr6Fv988W%tr#XD2=q4UR0QZWXm$pI|HUyR1-ULWO09J
ztYSQsemVe(TPj&m>4=NYC8KRt=%`pjQOnLSwDD<7Cso4;?3+6243^|Qi@<8gW;hHj
zY+ucN|CJGR<tMKSq%XtWuO#jJ}
zx3uF`aX=Yf+o#y5&^#y&+TlwkbSm4MBQj{;+qS}{Jq}WfWBOBB;mxn~UP8PM+Ge`B
z?P=G-dp0kgjr(c7YiQe=f;g0A9ZFG@r2Q%!C^10NgDzlMZYYTkVtsG0I*U8qOFCe@
zwb56Yn*HvZ%~V7g2zB4P0wNu&6eihIMY|?Gx@@Ju1{B>hoFj;|Jl!!chN-;Y+l0d$
zcrKNI%E_ehZId)aE|!4SB-gP+$C--~@r?Ase-9rAq(wvU$Z4PEx0^rVs~%A#xJ${=
zRFeAcfT_$!V?6gNm96+wi!jB3hy;2XTz4O(bLg!&+jmik$^_Rb3HO1>3ue^Y
zS;n00&qCKYHn(4Ke+|*l&owvrhc0_m;At05DLUi?ZIQu;3=U>g;iC<>$6Dk4-^IScob%
zUenlYTBGwAP(YD!ecW&fySEQ=t7j^$C1_Z0
z2Gf0S^$_j6yvc%b(LW9G-{Ol6o{8gE3&P^ltbevq_T(+gf?$6J55>Z~$XS7Xv9qZoA9;hjG0
z`Q25_zSGX%Js=t5F{D93E;mp|LV-B=EIY-OL{d*6TG(58XsIvm_={mq8!8DFV+k`k
zrF!$H_f0EoKP}{A4&)&PXVt7uLKYMG)dH*QtjzE8dW#YdC8)P~-d|yz&>%l32GE%+
z7S@x#YD2fI93m|;C<3;?DpQnEVyFwrs<{sz1w80qS->rtSW%6$szPjYMBzFu`H4CL
zs-CkOY$KEoI47twPN>Hv{(hNuWlm??a)&{tnAdnHK|0FhL6->iJG39P86WmGdmQ%aC-ws}8#QJr1vHJNY+hEg%5$wn
z?EJ{@(nAp;qKp`pA&TWWP3U)wPzJ5;AGN+szdyv!$T(SViubUkTL(59)Njm%+&w}Aq;CMRp}!H5o{vsgcVsc9gfJiyY!r{ijSm80fcm=CkQLD;G2ec{Of5}82DdU_gGV6hjY
z^poZ82%A_I1WU{@P}A_RXY`_t!)Fa6F`-s(k^hxQ4LOpvPgaw^RunOp9+QFip8evB
z7l8r3*ZLj{_mkk*b$xsJ%pK2_ib@CV8%km2Pnw7=h5etBjVwHu1DF-xLZ{1U%Ro}m(U%F6PYA_
z3>GGi8&Tf$vq|k>JrpB!!WeF(##k$dJ>(<6^vSfZZlX}h0br#)bkW%FGhQrrcin4$
z0S4$abnp{K;!XCIEe`-oLD_`y>D!h$-ZyM)(JFO_AT!noxTqQZ-LxVC2FQ|2SDy)u
zWb|O=89y^*+tOCvNMFz(e0n&BkwgQlG+jHVEBv)S@5uIPv~{o4^z9gs*0xxWYkmh6
z8hroL0ZU3OV~!ohwev1;xjG`>?~a#Pjf&kv0fN7pB-YFe&D4T@#bMI-V34q|#o*eC
zth%%m6yHv?Je1Ut*N>R)$YR(&^d!yVY@E7&l~-w+`F?q9gOS^Tr?^!rAmNp+<~40x
z4VX!XOA>oOXX{HMKTo-!=!HX$f%;1ZZ%sQ7<=cKFak7&CYBD>*9k~Hz?zMdswk(2o
zR60MR!6Pu1_Vz>M33gls1w+es2BL>>IY#jQ0!EqWFI)sskCQHOvEj;Z_&;Zx?!Bse
zi!R7k4>H~RUB!xS050Cdt60F@I|22<_YFA%M%CkdiL`$tuH(uxtUW|N?Tq0&8*wS2
z=x;P)8S>d@P56a18mam;Xj?d5m)I(q*0S|a_iC|8+vdG8R%-STP5-3xJtS+@-Q9LO
z)N5`_evo}Az+~pr^V39;d=7T{Nr|)G*4&chmwRKg3aLGF{VL&Ds(Jpz_!j$j(C9P-
z{;X{PAoR~Si!{z8HzfroO+0P`cdZy>S2e
z^3P&he;V2j9?^imxaC>v2IyjuHXs0pmScAR_WOD021`v4JormX<@CNtmj7SCE9NvT
z_G54E^h;04yl6n#8AZ(vJ=xBkU*k24a6<4P_8@0%?eJ`G*~|B-5O=U1!zN#3r+f{Krko{$jg_$;QMcW8zrk}eNP6e<1Yjfo0I
zdALxlx(kVWe+0{S%XST=D^sIV*0h*PrJjwm*Av9#u|@V@7L>j$g~>(i*WXAkW}7d*
zSA}2f@5NEus?!&Mg@}?R5^!_4W{{Dc=7_L-eqUcipS7{ZKyP7MHmoQ74~7&bZEHX?
zvZ*=byhzIZiS(3MOs}(-`I00%zZq{#(Bez_uE!j*fv(pizuY*E;grv4?!}J@j~VsF
zap)uv;PBpzlKJHj1bnfTUgQt6Lwr^ajF4uO;RpwM9>KKZqdJdH_#P?K?ZxBjd+O{#
z$68eP%zV0y3TF}cA8q3D)7XnXAhj+<$I8KM9f@W?R_xd~ReOAyl1q1a5NXzO{#fO8
zxEz{bgM;i3X6NEc=)CmceTj-Gm7DAr16)2?W*E$SQ(m~#QF&Cxcu?JDV^`(FoALHq
ze<}tO&!5jX`;6#i(5X%c12)I4FjHNtgLG}DN^8y{yDHEw7AX1LnuuD=oz
zb;pGE8-rB!8^EYlA{42TSdw}lwW^x-)q~BO^2IC+-rZouT63D(9#r4sL?&E?(^D3G
zY9B-@$vwM_Ksdgwws4K#l?iuwAzEiAh|jz__YJAmyz3RZp2-0?2ve*2uZ&a0Hs
z|KsxvD;kVw4YG``O6OOlY95=H6QdY~%^f9_anZDF=c^@8+;_Z0;*XfcoFtBm4#M)r+Mbt-%Lc9gKvdCt
zebC2STdSoLSEwSGI~q5@Z>MpXsWTl7xfkK#VK_x59&}!OqnHek7S&{?q%XD8!3KQK
z_niD@zD%Vf8~s~zx|H}j#+(VML)=5P*Sn+;J)ON&oEw$3$9}K34yG86w=uQlqMgtxrL+OeT71?T{?Rg21ju*
zvg-?(cbjf(dR_PuzpPB##$>;|U9=8=28e@Tx04xd`;4*Z_2#MLQIbW!haw5me0imU
znepiPT7@@@ce}?x4G9FG7i(|8wBo-{IdZX(WEQg=ZP;HySrA*5p+djCrx+$b9rnBV
z)$^31-V~jmtO39?>+oo-lhxp7!I_DCXPi+vd{dWHR;Sgzif%nY?~RY1+kCrXrwPMU
zCNRDjSJ34)B`$h*8>PmFK$RGy85e!i)Y9MX;cD)(%raR0?a#B_V+MRr
zUFz@Pf1Z4zc<_yK=(osj%w!7H0893qHHMP9+(wQyXXjLdRg+ucU%_%ak7@u@Y(!+q
zM##1Jk6Fmx`N_~L9_7@K7_BmKWo8Or;PSPwK=1G0EnPkJrY^TA_trdE
zW?9r^{WWwkK`fO@i}&tDoc4*99SL+65?E&))x4J@eSh8r_2B4bcl-c-n-|9ra8W;^
zljzeNq
zd+p@eeKV(HeW(}~GjkSr@fuy_qU`Zcy(AT#HF)15pnHqqLiGUrHI(m=sWC)aC(l*f
zdQ^pt4xPOd+(xU0)Y48~*zuTg$sn!{$3j;<*B#d9$7HQsXlfH^_U61cI-|QYEm+HK
zs81eSCl7z6iS5xORT(Vu=zcqKlsj(slZ*uYBk2Rhe;x-L|8c+DyUbmX%GpcA%{1Tn
z`oD!A!=|C;rtS)Xz_iWv|61rjC&p5BKS73Gh;wFj^NpPUNBn2inSdH0FUt?oe6+;A
zOQD0>Dd=yuS&-g#>@psHLQ+yYX6DCoYT|=HFLAXGf?;TCeuxCSD0reY))9Hz={U{C
zdj+jCSD?r0YujYT=@5-7(-#zt$3OsvJ7cnEY*nb$V3DJARosBTC>^5+t}ygG+WmJM
zkWu%>`wsB7pqC)LKOWN04Ck}bit{3U?jlmsR;mS3nghxXP%d8E_CJ!CrFY*1YX*J4fl8v-e3@`9<+#{;LcjKZd*)aiVs4t~=WVFG
za`2^_@}N}lMp*fFfEe6HNHUx&S*YRI+CwE$`%j^cN&{an0ydN-PwQ7@$ac1NM0IUu
zT+K1%sKyLVJc#0VIZ!s98;B(f_k8USp1S;#_?AfKn>sK>e7#XzTMo0_2+YOkEW}&5
zYOTjGO=P^{m^KPY6Z35C%df$|lMJ4^801r7tB%|%G_gw`EH@ef$PhaeH(3F&*|3v6
zKAsw+rAZ4B_}YCgJk7m&VyFvuvplqc8_KaJU~L%wgFnoWiJ|aInHw2*c3vcd!di6-
z$H(ptlrGL9e$GS-qO(nkWwu3v4O*8A`ua-JXlzCv8Jk+h1_`=rOUVrEwU%Mk9Be%1
zX(M6+RNf*54kpBQX1ZXgos3ErSYZCe$1f_Zj_{LHo^L5~j@~|Et$WdEpWn?7Y%qF{
zCr;vz1xrQQ`8Tb>L~-9oYoUqY!`$Y^krW=vx!;W9E*4Nb1polxU4k@lX)&V~i7PSt
zNw#z|JloxAOj^Wz!{gam(@Ao+BDUK}NydRXk+P1rB4gUYN9G~NEqP5XuC3c{Hf(f?
zYMLw@byTUta$!!EBd3=le=(D9qfI
zroC;#k}G4;;&&hUmnB^|<#IHtK!}I6%2i;)M|K<$^&eqB!t>$x92c8GUJMxDR6niDul>b25*h$Sr!vQqo0{>Zs>%x2&c
zlcxz>RqX*_DWBwPg^<-y{gF_Sw@_5NzD?h(Z4k|&`mt0B{^DJ-g%gk~E6_>+o77ZU
z^NRUqoVbqHm2<&eVXN)yag)Zr@ELy1Ux1K~!fzHR8m8@A#}}NWv8dOcsUscCMr6Jy|uUiBAO~^>^w201qcIM*si-
literal 0
HcmV?d00001
--
Gitee
From 5662844366ca38153407c8e4b8d8a12b3cd3843d Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Fri, 25 Mar 2022 06:45:09 +0000
Subject: [PATCH 14/88] =?UTF-8?q?=E7=AC=AC=E4=B8=80=E6=AC=A1=E6=8F=90?=
=?UTF-8?q?=E4=BA=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
new file mode 100644
index 000000000..d648fdf46
--- /dev/null
+++ b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:NOK
+PerfStatus:NOK
+PrecisionStatus:NOK
\ No newline at end of file
--
Gitee
From 1a9b03d9a1428cb7869ff7cf9851fb07cf29c47a Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:49:06 +0000
Subject: [PATCH 15/88] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SVD=5FID2019=5Ffor=5FTensorflow/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 183 ------------------
1 file changed, 183 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
deleted file mode 100644
index 2c4d94f72..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
+++ /dev/null
@@ -1,183 +0,0 @@
-基本信息
-
-**发布者(Publisher):Huawei**
-
-**应用领域(Application Domain):CV**
-
-**版本(Version):**
-
-**修改时间(Modified) :2022.3.25**
-
-**框架(Framework):TensorFlow 1.15.0**
-
-**处理器(Processor):昇腾910**
-
-**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
-
-概述
-
-给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。
-
-- 开源代码:
-
- https://github.com/google-research/google-research/tree/master/special_orthogonalization。
-
-- 参考论文:
-
- [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
-
-- 参考实现:
-
- obs://cann-id2019/gpu/
-
-
-- 通过Git获取对应commit\_id的代码方法如下:
-
- ```
- git clone {repository_url} # 克隆仓库的代码
- cd {repository_name} # 切换到模型的代码仓目录
- git checkout {branch} # 切换到对应分支
- git reset --hard {commit_id} # 代码设置到对应的commit_id
- cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
- ```
-
-## 默认配置
-
-- 数据集获取:obs://cann-id2019/dataset/
-
-- 训练超参
-
- - log_step_count=200
- - save_summaries_steps=25000
- - train_steps=2600000
- - save_checkpoints_steps=100000
- - eval_examples=39900
-
-
-
-训练环境准备
-
-1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
-2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
-
- 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
-
- **表 1** 镜像列表
-
-
- 镜像名称
- |
- 镜像版本
- |
- 配套CANN版本
- |
-
-
-
- |
- 20.2.0
- |
- 20.2
- |
-
-
-
-
-
-
-## 脚本参数
-
-```
---input_test_files1 原始输入测试文件的正则表达式
---input_test_files2 原始输入测试文件的正则表达式
---output_directory1 将存储新测试文件的输出目录
---output_directory2 将存储新测试文件的输出目录
---num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
---random_rotation_axang 如果为真,则使用该方法从原始基准代码中对随机旋转进行采样,否则样本采用哈尔测量。默认为真
---method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
---checkpoint_dir 保存检查点位置
---train_steps 训练迭代的次数。默认为2600000
---save_checkpoints_steps 保存检查点的频率。默认为10000
---log_step_count 日志记录一次的步数。默认为500
---save_summaries_steps 保存一次summary的步数。默认为5000
---learning_rate 默认为1e-5
---lr_decay 如果为真,则衰减learning rate。默认为假
---lr_decay_steps learning rate衰减步数。默认为35000
---lr_decay_rate learning rate衰减速率。默认为0.95
---predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的错误。默认为假
---eval_examples 测试样本的数量。默认为0
---print_variable_names 打印模型变量名。默认为假
---num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
---pt_cloud_train_files 匹配所有训练点文件的表达式
---pt_cloud_test_files 匹配所有修改的测试点文件的表达式
-
-```
-
-
-
-## 运行
-
-GPU运行命令如下:
-
-**修改原始测试数据**
-
-注:生成的文件points_test_modified、points0已包含在dataset文件夹中。
-```bash
-# 将路径设置到训练点云图文件
-IN_FILES1=/shapenet/data/pc_plane/points/*.pts
-IN_FILES2=/shapenet/data/pc_plane/points_test/*.pts
-
-# 设置新生成文件的路径
-NEW_TEST_FILES_DIR1=/shapenet/data/pc_plane/points0
-NEW_TEST_FILES_DIR2=/shapenet/data/pc_plane/points_test_modified
-
-# 决定旋转轴角的分布
-AXANG_SAMPLING=True
-
-python -m special_orthogonalization.gen_pt_test_data --input_test_files1=$IN_FILES1 --input_test_files2=$IN_FILES2 --output_directory1=$NEW_TEST_FILES_DIR1 --output_directory2=$NEW_TEST_FILES_DIR2 --random_rotation_axang=$AXANG_SAMPLING
-```
-
-**训练与评价**
-```bash
-# 将路径设置到原始训练数据
-TRAIN_FILES=/shapenet/data/pc_plane/points0/*.pts
-
-#将路径设置到旋转后的训练数据
-TEST_FILES=$NEW_TEST_FILES_DIR2/*.pts
-
-# 指定旋转预测方式
-METHOD=svd
-
-# 指定ckpt、summaries、评价结果等的存储路径
-OUT_DIR=/path/to/model
-
-python -m special_orthogonalization.main_point_cloud --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2200000 --save_checkpoints_steps=100000 --eval_examples=39900
-```
-
-**从所有训练样本中生成统计数据**
-```bash
-# 打印均值、中位数、标准差和分位数
-python -m special_orthogonalization.main_point_cloud --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
-```
-
-## 训练结果
-**精度对比:**
-
-由于弹性云服务器上Tesla V100的GPU训练环境无法支持运行超过100万步,选用矩池云GPU的运行结果进行精度对比。
-
-
-| 测地线误差(°) | 论文发布 | GPU实测 | NPU实测 |
-| ------------------------ | ------- | ----- | ------- |
-| 平均值 | 1.63 | 5.55 | 待测 |
-| 中值 | 0.89 | 3.65 | 待测 |
-| 标准偏差 | 6.70 | 10.68 | 待测 |
-
-**性能对比:**
-
-取弹性云GPU运行的前2600步的global_step/sec平均值和NPU运行的前2600步的global_step/sec平均值进行对比,以达到性能对比的目的。
-
-| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
-| ------------------- | ------- | ------ | ------ |
-| global_step/sec| 无 | 79.48 | 66.08 |
-
-
--
Gitee
From f353bdabf5627ce2ca8bb9fc8d019e04b7f5202d Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:49:32 +0000
Subject: [PATCH 16/88] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E5=90=8E=E7=9A=84readm?=
=?UTF-8?q?e=EF=BC=8C=E5=8A=A0=E5=85=A5=E4=BA=86NPU=E4=B8=8A=E8=BF=90?=
=?UTF-8?q?=E8=A1=8C=E7=9A=84=E6=9C=80=E6=96=B0=E7=BB=93=E6=9E=9C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 229 ++++++++++++++++++
1 file changed, 229 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
new file mode 100644
index 000000000..a4ccebb34
--- /dev/null
+++ b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
@@ -0,0 +1,229 @@
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):CV**
+
+**版本(Version):**
+
+**修改时间(Modified) :2022.05.05**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**处理器(Processor):昇腾910**
+
+**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
+
+概述
+
+给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。
+
+- 开源代码:
+
+ https://github.com/google-research/google-research/tree/master/special_orthogonalization。
+
+- 参考论文:
+
+ [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
+
+- 参考实现:
+
+ obs://cann-id2019/gpu/
+
+- 相关迁移的工作:
+ 在进行代码迁移到NPU上时,输入的训练数据为点云数据,点云数据的shape为(N,3),其中N并不是固定的,因此在NPU上存在动态shape的问题,导致模型训练无法正常进行。我们为此想了三个解决方法:1、找出所有点云数据中最小的N,对于大于N的点云数据,仅取前N行的数据输入训练。2、找到所有点云数据中最大的N,对于小于N的点云进行补0操作,将所有数据固定为最大的N后,输入网络进行训练。3、找到所有点云数据中最大的N,对小于N的点云数据,从原数据中选择一个点云进行填补至行数为N,再将数据输入网络进行训练。该三种方法均成功解决了NPU上的动态shape问题,但是第一种方法删除了样本点,因此导致最后训练出的模型精度很差;第二种方法虽然并没有丢失样本信息,但是向数据中填入大量的0,改变了本来的代码逻辑,导致最后训练出的模型精度也并不高。对于第三种方法,即没有丢失样本信息,对每个点云数据中的某一个点云样本点进行重复操作,没有改变原始的代码逻辑,最后也获得了不错的精度表现。
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+## 默认配置
+
+- 数据集获取:obs://cann-id2019/dataset/
+
+- 训练超参
+
+ - log_step_count=200
+ - save_summaries_steps=25000
+ - train_steps=2600000
+ - save_checkpoints_steps=100000
+ - eval_examples=39900
+
+
+
+训练环境准备
+
+1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
+2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
+
+ 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
+
+ **表 1** 镜像列表
+
+
+ 镜像名称
+ |
+ 镜像版本
+ |
+ 配套CANN版本
+ |
+
+
+
+ |
+ 20.2.0
+ |
+ 20.2
+ |
+
+
+
+
+
+
+## 脚本参数
+
+```
+gen_pt_test_data_gpu.py 中的参数
+
+--input_test_files 传入需要进行旋转的点云数据集
+--output_directory 存储旋转后点云集的路径
+--random_rotation_axang boole型,若为真将会对传入的数据集采用指定方法旋转,默认为真
+--num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
+
+
+main_point_cloud_gpu.py 中的参数
+
+--pt_cloud_test_files 测试数据集路径
+--pt_cloud_train_files 熟练数据集路径
+--method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
+--checkpoint_dir 训练模型的存放位置
+--train_steps 训练迭代的次数。默认为2600000
+--save_checkpoints_steps 保存检查点的频率。默认为10000
+--log_step_count 日志记录一次的步数。默认为200
+--save_summaries_steps 保存一次summary的步数。默认为5000
+--learning_rate 默认为1e-5
+--lr_decay 如果为真,则衰减learning rate。默认为假
+--lr_decay_steps learning rate衰减步数。默认为35000
+--lr_decay_rate learning rate衰减速率。默认为0.95
+--predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的误差信息。默认为假
+--eval_examples 测试样本的数量。默认为0
+--print_variable_names 打印模型变量名。默认为假
+--num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
+
+```
+
+
+
+## 运行
+
+GPU运行命令如下:
+
+**生成测试数据**
+
+注:生成的文件points_test_modified、points已包含在dataset文件夹中。
+```bash
+# 将路径设置到训练点云图文件
+IN_FILES=/shapenet/data/pc_plane/points_test/*.pts
+
+NEW_TEST_FILES_DIR=/shapenet/data/pc_plane/points_test_modified
+
+AXANG_SAMPLING=True
+
+# 决定旋转轴角的分布
+AXANG_SAMPLING=True
+
+python -m special_orthogonalization.gen_pt_test_data_gpu --input_test_files=$IN_FILES --output_directory=$NEW_TEST_FILES_DIR --random_rotation_axang=$AXANG_SAMPLING
+```
+
+**训练与评价**
+```bash
+# 将路径设置到原始训练数据
+TRAIN_FILES=/shapenet/data/pc_plane/points/*.pts
+
+#将路径设置到旋转后的训练数据
+TEST_FILES=$NEW_TEST_FILES_DIR/*.pts
+
+# 指定旋转预测方式
+METHOD=svd
+
+# 指定ckpt、summaries、评价结果等的存储路径
+OUT_DIR=/path/to/model
+
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2600000 --save_checkpoints_steps=100000 --eval_examples=39900
+```
+
+**从所有训练样本中生成统计数据**
+```bash
+# 打印均值、中位数、标准差和分位数
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
+```
+## 运行
+
+NPU运行命令方式如下:
+
+对于所有的三个步骤程序来说,modelarts插件obs桶中的数据路径均要写到真正包含数据的那一个路径
+如在pc_plane文件夹中含有points、points_test等包含数据的文件夹
+modelarts插件中的数据路径写为 obs://cann-id2019/data/pc_palne/
+
+**生成测试数据**
+
+运行这一步我们需要的程序文件为gen_pt_test_data.py、modelarts_entry_Axang.py、genTestData.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第二步模型训练,需要将生成旋转后的文件转移至obs桶中存放data的路径
+
+注:需要确保的是存在obs桶里的data文件名为points_test
+
+**训练与评价**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_acc_train.py、train_full_1p.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第三步,需要将生成的output文件转移至obs桶中存放data的路径
+
+注意:该次训练的模型保存在该次的obs文件夹中,进行第三步时又需要重启一次新的modelarts,因此我们需要将output文件中的
+checkpoint文件中最新模型的路径修改
+"/home/ma-user/modelarts/inputs/data_url_0/output"
+这样第三步才能跑出正确的精度指标
+
+**从所有训练样本中生成统计数据**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_stat.py、genStatistical.sh
+这三个文件的代码均不需要修改
+运行成功后将会在屏幕上打印出关于精度相应的统计量值
+
+注意:由于加入了混合精度提高训练性能,导致该步出现未知错误,输出的误差均为0,因此建议将训练后的结果下载到本地
+修改checkpoint文件中模型的路径后,利用本地的GPU进行运行,实际上该步骤在运行时NPU成功启动但也未曾利用。
+
+## 训练结果
+**精度对比:**
+
+
+
+| 测地线误差(°) | 论文发布 | GPU(初始代码未改动版本) | GPU实测|NPU实测 |
+| ------------------------ | ------- | ----- | --------- |----------|
+| 平均值 | 1.63 | 2.58 | 3.98 | 2.92 |
+| 中值 | 0.89 | 1.68 | 2.6 | 1.7 |
+| 标准差 | 6.70 | 6.93 | 9.36 | 8.45 |
+
+相比于论文中的精度,我们NPU迁移后实测差距依然较大,但是与我们未对代码任何改动初始的版本在GPU上跑出来的精度相差较小,
+且对于相同的代码的代码改动,NPU上的精度优于GPU上的精度。需要注意的是,在NPU上运行程序时,我们采用混合精度来提升训练
+的性能,但是其中产生了未知的错误,导致代表的精度指标mean_degree_err在整个训练过程中始终为0,因此我们无法得知在NPU训练的
+260w步的过程中,精度指标是下降的过程是怎样的。值得庆幸的是通过NPU训练出的模型,能够在GPU上计算出精度,并且精度还不错。
+
+**性能对比:**
+
+取弹性云GPU运行的前2600步的global_step/sec平均值和NPU运行的前2600步的global_step/sec平均值进行对比,以达到性能对比的目的。
+
+| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
+| ------------------- | ------- | ------ | ------ |
+| global_step/sec| 无 | 87.64 | 101.24 |
+
+
--
Gitee
From 909bf473bee586a14649f7634c8796108836d851 Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:55:26 +0000
Subject: [PATCH 17/88] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SVD=5FID2019=5Ffor=5FTensorflow/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 229 ------------------
1 file changed, 229 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
deleted file mode 100644
index a4ccebb34..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
+++ /dev/null
@@ -1,229 +0,0 @@
-基本信息
-
-**发布者(Publisher):Huawei**
-
-**应用领域(Application Domain):CV**
-
-**版本(Version):**
-
-**修改时间(Modified) :2022.05.05**
-
-**框架(Framework):TensorFlow 1.15.0**
-
-**处理器(Processor):昇腾910**
-
-**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
-
-概述
-
-给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。
-
-- 开源代码:
-
- https://github.com/google-research/google-research/tree/master/special_orthogonalization。
-
-- 参考论文:
-
- [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
-
-- 参考实现:
-
- obs://cann-id2019/gpu/
-
-- 相关迁移的工作:
- 在进行代码迁移到NPU上时,输入的训练数据为点云数据,点云数据的shape为(N,3),其中N并不是固定的,因此在NPU上存在动态shape的问题,导致模型训练无法正常进行。我们为此想了三个解决方法:1、找出所有点云数据中最小的N,对于大于N的点云数据,仅取前N行的数据输入训练。2、找到所有点云数据中最大的N,对于小于N的点云进行补0操作,将所有数据固定为最大的N后,输入网络进行训练。3、找到所有点云数据中最大的N,对小于N的点云数据,从原数据中选择一个点云进行填补至行数为N,再将数据输入网络进行训练。该三种方法均成功解决了NPU上的动态shape问题,但是第一种方法删除了样本点,因此导致最后训练出的模型精度很差;第二种方法虽然并没有丢失样本信息,但是向数据中填入大量的0,改变了本来的代码逻辑,导致最后训练出的模型精度也并不高。对于第三种方法,即没有丢失样本信息,对每个点云数据中的某一个点云样本点进行重复操作,没有改变原始的代码逻辑,最后也获得了不错的精度表现。
-
-- 通过Git获取对应commit\_id的代码方法如下:
-
- ```
- git clone {repository_url} # 克隆仓库的代码
- cd {repository_name} # 切换到模型的代码仓目录
- git checkout {branch} # 切换到对应分支
- git reset --hard {commit_id} # 代码设置到对应的commit_id
- cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
- ```
-
-## 默认配置
-
-- 数据集获取:obs://cann-id2019/dataset/
-
-- 训练超参
-
- - log_step_count=200
- - save_summaries_steps=25000
- - train_steps=2600000
- - save_checkpoints_steps=100000
- - eval_examples=39900
-
-
-
-训练环境准备
-
-1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
-2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
-
- 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
-
- **表 1** 镜像列表
-
-
- 镜像名称
- |
- 镜像版本
- |
- 配套CANN版本
- |
-
-
-
- |
- 20.2.0
- |
- 20.2
- |
-
-
-
-
-
-
-## 脚本参数
-
-```
-gen_pt_test_data_gpu.py 中的参数
-
---input_test_files 传入需要进行旋转的点云数据集
---output_directory 存储旋转后点云集的路径
---random_rotation_axang boole型,若为真将会对传入的数据集采用指定方法旋转,默认为真
---num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
-
-
-main_point_cloud_gpu.py 中的参数
-
---pt_cloud_test_files 测试数据集路径
---pt_cloud_train_files 熟练数据集路径
---method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
---checkpoint_dir 训练模型的存放位置
---train_steps 训练迭代的次数。默认为2600000
---save_checkpoints_steps 保存检查点的频率。默认为10000
---log_step_count 日志记录一次的步数。默认为200
---save_summaries_steps 保存一次summary的步数。默认为5000
---learning_rate 默认为1e-5
---lr_decay 如果为真,则衰减learning rate。默认为假
---lr_decay_steps learning rate衰减步数。默认为35000
---lr_decay_rate learning rate衰减速率。默认为0.95
---predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的误差信息。默认为假
---eval_examples 测试样本的数量。默认为0
---print_variable_names 打印模型变量名。默认为假
---num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
-
-```
-
-
-
-## 运行
-
-GPU运行命令如下:
-
-**生成测试数据**
-
-注:生成的文件points_test_modified、points已包含在dataset文件夹中。
-```bash
-# 将路径设置到训练点云图文件
-IN_FILES=/shapenet/data/pc_plane/points_test/*.pts
-
-NEW_TEST_FILES_DIR=/shapenet/data/pc_plane/points_test_modified
-
-AXANG_SAMPLING=True
-
-# 决定旋转轴角的分布
-AXANG_SAMPLING=True
-
-python -m special_orthogonalization.gen_pt_test_data_gpu --input_test_files=$IN_FILES --output_directory=$NEW_TEST_FILES_DIR --random_rotation_axang=$AXANG_SAMPLING
-```
-
-**训练与评价**
-```bash
-# 将路径设置到原始训练数据
-TRAIN_FILES=/shapenet/data/pc_plane/points/*.pts
-
-#将路径设置到旋转后的训练数据
-TEST_FILES=$NEW_TEST_FILES_DIR/*.pts
-
-# 指定旋转预测方式
-METHOD=svd
-
-# 指定ckpt、summaries、评价结果等的存储路径
-OUT_DIR=/path/to/model
-
-python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2600000 --save_checkpoints_steps=100000 --eval_examples=39900
-```
-
-**从所有训练样本中生成统计数据**
-```bash
-# 打印均值、中位数、标准差和分位数
-python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
-```
-## 运行
-
-NPU运行命令方式如下:
-
-对于所有的三个步骤程序来说,modelarts插件obs桶中的数据路径均要写到真正包含数据的那一个路径
-如在pc_plane文件夹中含有points、points_test等包含数据的文件夹
-modelarts插件中的数据路径写为 obs://cann-id2019/data/pc_palne/
-
-**生成测试数据**
-
-运行这一步我们需要的程序文件为gen_pt_test_data.py、modelarts_entry_Axang.py、genTestData.sh
-这三个文件中的代码均不需要修改
-最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
-为进行第二步模型训练,需要将生成旋转后的文件转移至obs桶中存放data的路径
-
-注:需要确保的是存在obs桶里的data文件名为points_test
-
-**训练与评价**
-
-运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_acc_train.py、train_full_1p.sh
-这三个文件中的代码均不需要修改
-最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
-为进行第三步,需要将生成的output文件转移至obs桶中存放data的路径
-
-注意:该次训练的模型保存在该次的obs文件夹中,进行第三步时又需要重启一次新的modelarts,因此我们需要将output文件中的
-checkpoint文件中最新模型的路径修改
-"/home/ma-user/modelarts/inputs/data_url_0/output"
-这样第三步才能跑出正确的精度指标
-
-**从所有训练样本中生成统计数据**
-
-运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_stat.py、genStatistical.sh
-这三个文件的代码均不需要修改
-运行成功后将会在屏幕上打印出关于精度相应的统计量值
-
-注意:由于加入了混合精度提高训练性能,导致该步出现未知错误,输出的误差均为0,因此建议将训练后的结果下载到本地
-修改checkpoint文件中模型的路径后,利用本地的GPU进行运行,实际上该步骤在运行时NPU成功启动但也未曾利用。
-
-## 训练结果
-**精度对比:**
-
-
-
-| 测地线误差(°) | 论文发布 | GPU(初始代码未改动版本) | GPU实测|NPU实测 |
-| ------------------------ | ------- | ----- | --------- |----------|
-| 平均值 | 1.63 | 2.58 | 3.98 | 2.92 |
-| 中值 | 0.89 | 1.68 | 2.6 | 1.7 |
-| 标准差 | 6.70 | 6.93 | 9.36 | 8.45 |
-
-相比于论文中的精度,我们NPU迁移后实测差距依然较大,但是与我们未对代码任何改动初始的版本在GPU上跑出来的精度相差较小,
-且对于相同的代码的代码改动,NPU上的精度优于GPU上的精度。需要注意的是,在NPU上运行程序时,我们采用混合精度来提升训练
-的性能,但是其中产生了未知的错误,导致代表的精度指标mean_degree_err在整个训练过程中始终为0,因此我们无法得知在NPU训练的
-260w步的过程中,精度指标是下降的过程是怎样的。值得庆幸的是通过NPU训练出的模型,能够在GPU上计算出精度,并且精度还不错。
-
-**性能对比:**
-
-取弹性云GPU运行的前2600步的global_step/sec平均值和NPU运行的前2600步的global_step/sec平均值进行对比,以达到性能对比的目的。
-
-| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
-| ------------------- | ------- | ------ | ------ |
-| global_step/sec| 无 | 87.64 | 101.24 |
-
-
--
Gitee
From 6f1937bb19df074af7fbc1ca8087d2c290080142 Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:55:50 +0000
Subject: [PATCH 18/88] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E5=90=8E=E7=9A=84readm?=
=?UTF-8?q?e=EF=BC=8C=E5=8A=A0=E5=85=A5=E4=BA=86NPU=E4=B8=8A=E8=BF=90?=
=?UTF-8?q?=E8=A1=8C=E7=9A=84=E6=9C=80=E6=96=B0=E7=BB=93=E6=9E=9C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 229 ++++++++++++++++++
1 file changed, 229 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
new file mode 100644
index 000000000..e75cc15c5
--- /dev/null
+++ b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
@@ -0,0 +1,229 @@
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):CV**
+
+**版本(Version):**
+
+**修改时间(Modified) :2022.05.05**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**处理器(Processor):昇腾910**
+
+**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
+
+概述
+
+给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。代码的训练逻辑是每训练10w步保存一个模型,并且在测试集上检验该模型的精度,最后比较的都是260w步的模型精度
+
+- 开源代码:
+
+ https://github.com/google-research/google-research/tree/master/special_orthogonalization。
+
+- 参考论文:
+
+ [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
+
+- 参考实现:
+
+ obs://cann-id2019/gpu/
+
+- 相关迁移的工作:
+ 在进行代码迁移到NPU上时,输入的训练数据为点云数据,点云数据的shape为(N,3),其中N并不是固定的,因此在NPU上存在动态shape的问题,导致模型训练无法正常进行。我们为此想了三个解决方法:1、找出所有点云数据中最小的N,对于大于N的点云数据,仅取前N行的数据输入训练。2、找到所有点云数据中最大的N,对于小于N的点云进行补0操作,将所有数据固定为最大的N后,输入网络进行训练。3、找到所有点云数据中最大的N,对小于N的点云数据,从原数据中选择一个点云进行填补至行数为N,再将数据输入网络进行训练。该三种方法均成功解决了NPU上的动态shape问题,但是第一种方法删除了样本点,因此导致最后训练出的模型精度很差;第二种方法虽然并没有丢失样本信息,但是向数据中填入大量的0,改变了本来的代码逻辑,导致最后训练出的模型精度也并不高。对于第三种方法,即没有丢失样本信息,对每个点云数据中的某一个点云样本点进行重复操作,没有改变原始的代码逻辑,最后也获得了不错的精度表现。
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+## 默认配置
+
+- 数据集获取:obs://cann-id2019/dataset/
+
+- 训练超参
+
+ - log_step_count=200
+ - save_summaries_steps=25000
+ - train_steps=2600000
+ - save_checkpoints_steps=100000
+ - eval_examples=39900
+
+
+
+训练环境准备
+
+1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
+2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
+
+ 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
+
+ **表 1** 镜像列表
+
+
+ 镜像名称
+ |
+ 镜像版本
+ |
+ 配套CANN版本
+ |
+
+
+
+ |
+ 20.2.0
+ |
+ 20.2
+ |
+
+
+
+
+
+
+## 脚本参数
+
+```
+gen_pt_test_data_gpu.py 中的参数
+
+--input_test_files 传入需要进行旋转的点云数据集
+--output_directory 存储旋转后点云集的路径
+--random_rotation_axang boole型,若为真将会对传入的数据集采用指定方法旋转,默认为真
+--num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
+
+
+main_point_cloud_gpu.py 中的参数
+
+--pt_cloud_test_files 测试数据集路径
+--pt_cloud_train_files 熟练数据集路径
+--method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
+--checkpoint_dir 训练模型的存放位置
+--train_steps 训练迭代的次数。默认为2600000
+--save_checkpoints_steps 保存检查点的频率。默认为10000
+--log_step_count 日志记录一次的步数。默认为200
+--save_summaries_steps 保存一次summary的步数。默认为5000
+--learning_rate 默认为1e-5
+--lr_decay 如果为真,则衰减learning rate。默认为假
+--lr_decay_steps learning rate衰减步数。默认为35000
+--lr_decay_rate learning rate衰减速率。默认为0.95
+--predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的误差信息。默认为假
+--eval_examples 测试样本的数量。默认为0
+--print_variable_names 打印模型变量名。默认为假
+--num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
+
+```
+
+
+
+## 运行
+
+GPU运行命令如下:
+
+**生成测试数据**
+
+注:生成的文件points_test_modified、points已包含在dataset文件夹中。
+```bash
+# 将路径设置到训练点云图文件
+IN_FILES=/shapenet/data/pc_plane/points_test/*.pts
+
+NEW_TEST_FILES_DIR=/shapenet/data/pc_plane/points_test_modified
+
+AXANG_SAMPLING=True
+
+# 决定旋转轴角的分布
+AXANG_SAMPLING=True
+
+python -m special_orthogonalization.gen_pt_test_data_gpu --input_test_files=$IN_FILES --output_directory=$NEW_TEST_FILES_DIR --random_rotation_axang=$AXANG_SAMPLING
+```
+
+**训练与评价**
+```bash
+# 将路径设置到原始训练数据
+TRAIN_FILES=/shapenet/data/pc_plane/points/*.pts
+
+#将路径设置到旋转后的训练数据
+TEST_FILES=$NEW_TEST_FILES_DIR/*.pts
+
+# 指定旋转预测方式
+METHOD=svd
+
+# 指定ckpt、summaries、评价结果等的存储路径
+OUT_DIR=/path/to/model
+
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2600000 --save_checkpoints_steps=100000 --eval_examples=39900
+```
+
+**从所有训练样本中生成统计数据**
+```bash
+# 打印均值、中位数、标准差和分位数
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
+```
+## 运行
+
+NPU运行命令方式如下:
+
+对于所有的三个步骤程序来说,modelarts插件obs桶中的数据路径均要写到真正包含数据的那一个路径
+如在pc_plane文件夹中含有points、points_test等包含数据的文件夹
+modelarts插件中的数据路径写为 obs://cann-id2019/data/pc_palne/
+
+**生成测试数据**
+
+运行这一步我们需要的程序文件为gen_pt_test_data.py、modelarts_entry_Axang.py、genTestData.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第二步模型训练,需要将生成旋转后的文件转移至obs桶中存放data的路径
+
+注:需要确保的是存在obs桶里的data文件名为points_test
+
+**训练与评价**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_acc_train.py、train_full_1p.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第三步,需要将生成的output文件转移至obs桶中存放data的路径
+
+注意:该次训练的模型保存在该次的obs文件夹中,进行第三步时又需要重启一次新的modelarts,因此我们需要将output文件中的
+checkpoint文件中最新模型的路径修改
+"/home/ma-user/modelarts/inputs/data_url_0/output"
+这样第三步才能跑出正确的精度指标
+
+**从所有训练样本中生成统计数据**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_stat.py、genStatistical.sh
+这三个文件的代码均不需要修改
+运行成功后将会在屏幕上打印出关于精度相应的统计量值
+
+注意:由于加入了混合精度提高训练性能,导致该步出现未知错误,输出的误差均为0,因此建议将训练后的结果下载到本地
+修改checkpoint文件中模型的路径后,利用本地的GPU进行运行,实际上该步骤在运行时NPU成功启动但也未曾利用。
+
+## 训练结果
+**精度对比:**
+
+
+
+| 测地线误差(°) | 论文发布 | GPU(初始代码未改动版本) | GPU实测|NPU实测 |
+| ------------------------ | ------- | ----- | --------- |----------|
+| 平均值 | 1.63 | 2.58 | 3.98 | 2.92 |
+| 中值 | 0.89 | 1.68 | 2.6 | 1.7 |
+| 标准差 | 6.70 | 6.93 | 9.36 | 8.45 |
+
+相比于论文中的精度,我们NPU迁移后实测差距依然较大,但是与我们未对代码任何改动初始的版本在GPU上跑出来的精度相差较小,
+且对于相同的代码的代码改动,NPU上的精度优于GPU上的精度。需要注意的是,在NPU上运行程序时,我们采用混合精度来提升训练
+的性能,但是其中产生了未知的错误,导致代表的精度指标mean_degree_err在整个训练过程中始终为0,因此我们无法得知在NPU训练的
+260w步的过程中,精度指标是下降的过程是怎样的。值得庆幸的是通过NPU训练出的模型,能够在GPU上计算出精度,并且精度还不错。
+
+**性能对比:**
+
+取弹性云GPU运行的前2600步的global_step/sec平均值和NPU运行的前2600步的global_step/sec平均值进行对比,以达到性能对比的目的。
+
+| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
+| ------------------- | ------- | ------ | ------ |
+| global_step/sec| 无 | 87.64 | 101.24 |
+
+
--
Gitee
From 4d6502db3806624a34ca18ac2700c54c8eb2deee Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:56:49 +0000
Subject: [PATCH 19/88] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SVD=5FID2019=5Ffor=5FTensorflow/README.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 229 ------------------
1 file changed, 229 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
deleted file mode 100644
index e75cc15c5..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
+++ /dev/null
@@ -1,229 +0,0 @@
-基本信息
-
-**发布者(Publisher):Huawei**
-
-**应用领域(Application Domain):CV**
-
-**版本(Version):**
-
-**修改时间(Modified) :2022.05.05**
-
-**框架(Framework):TensorFlow 1.15.0**
-
-**处理器(Processor):昇腾910**
-
-**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
-
-概述
-
-给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。代码的训练逻辑是每训练10w步保存一个模型,并且在测试集上检验该模型的精度,最后比较的都是260w步的模型精度
-
-- 开源代码:
-
- https://github.com/google-research/google-research/tree/master/special_orthogonalization。
-
-- 参考论文:
-
- [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
-
-- 参考实现:
-
- obs://cann-id2019/gpu/
-
-- 相关迁移的工作:
- 在进行代码迁移到NPU上时,输入的训练数据为点云数据,点云数据的shape为(N,3),其中N并不是固定的,因此在NPU上存在动态shape的问题,导致模型训练无法正常进行。我们为此想了三个解决方法:1、找出所有点云数据中最小的N,对于大于N的点云数据,仅取前N行的数据输入训练。2、找到所有点云数据中最大的N,对于小于N的点云进行补0操作,将所有数据固定为最大的N后,输入网络进行训练。3、找到所有点云数据中最大的N,对小于N的点云数据,从原数据中选择一个点云进行填补至行数为N,再将数据输入网络进行训练。该三种方法均成功解决了NPU上的动态shape问题,但是第一种方法删除了样本点,因此导致最后训练出的模型精度很差;第二种方法虽然并没有丢失样本信息,但是向数据中填入大量的0,改变了本来的代码逻辑,导致最后训练出的模型精度也并不高。对于第三种方法,即没有丢失样本信息,对每个点云数据中的某一个点云样本点进行重复操作,没有改变原始的代码逻辑,最后也获得了不错的精度表现。
-
-- 通过Git获取对应commit\_id的代码方法如下:
-
- ```
- git clone {repository_url} # 克隆仓库的代码
- cd {repository_name} # 切换到模型的代码仓目录
- git checkout {branch} # 切换到对应分支
- git reset --hard {commit_id} # 代码设置到对应的commit_id
- cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
- ```
-
-## 默认配置
-
-- 数据集获取:obs://cann-id2019/dataset/
-
-- 训练超参
-
- - log_step_count=200
- - save_summaries_steps=25000
- - train_steps=2600000
- - save_checkpoints_steps=100000
- - eval_examples=39900
-
-
-
-训练环境准备
-
-1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
-2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
-
- 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
-
- **表 1** 镜像列表
-
-
- 镜像名称
- |
- 镜像版本
- |
- 配套CANN版本
- |
-
-
-
- |
- 20.2.0
- |
- 20.2
- |
-
-
-
-
-
-
-## 脚本参数
-
-```
-gen_pt_test_data_gpu.py 中的参数
-
---input_test_files 传入需要进行旋转的点云数据集
---output_directory 存储旋转后点云集的路径
---random_rotation_axang boole型,若为真将会对传入的数据集采用指定方法旋转,默认为真
---num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
-
-
-main_point_cloud_gpu.py 中的参数
-
---pt_cloud_test_files 测试数据集路径
---pt_cloud_train_files 熟练数据集路径
---method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
---checkpoint_dir 训练模型的存放位置
---train_steps 训练迭代的次数。默认为2600000
---save_checkpoints_steps 保存检查点的频率。默认为10000
---log_step_count 日志记录一次的步数。默认为200
---save_summaries_steps 保存一次summary的步数。默认为5000
---learning_rate 默认为1e-5
---lr_decay 如果为真,则衰减learning rate。默认为假
---lr_decay_steps learning rate衰减步数。默认为35000
---lr_decay_rate learning rate衰减速率。默认为0.95
---predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的误差信息。默认为假
---eval_examples 测试样本的数量。默认为0
---print_variable_names 打印模型变量名。默认为假
---num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
-
-```
-
-
-
-## 运行
-
-GPU运行命令如下:
-
-**生成测试数据**
-
-注:生成的文件points_test_modified、points已包含在dataset文件夹中。
-```bash
-# 将路径设置到训练点云图文件
-IN_FILES=/shapenet/data/pc_plane/points_test/*.pts
-
-NEW_TEST_FILES_DIR=/shapenet/data/pc_plane/points_test_modified
-
-AXANG_SAMPLING=True
-
-# 决定旋转轴角的分布
-AXANG_SAMPLING=True
-
-python -m special_orthogonalization.gen_pt_test_data_gpu --input_test_files=$IN_FILES --output_directory=$NEW_TEST_FILES_DIR --random_rotation_axang=$AXANG_SAMPLING
-```
-
-**训练与评价**
-```bash
-# 将路径设置到原始训练数据
-TRAIN_FILES=/shapenet/data/pc_plane/points/*.pts
-
-#将路径设置到旋转后的训练数据
-TEST_FILES=$NEW_TEST_FILES_DIR/*.pts
-
-# 指定旋转预测方式
-METHOD=svd
-
-# 指定ckpt、summaries、评价结果等的存储路径
-OUT_DIR=/path/to/model
-
-python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2600000 --save_checkpoints_steps=100000 --eval_examples=39900
-```
-
-**从所有训练样本中生成统计数据**
-```bash
-# 打印均值、中位数、标准差和分位数
-python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
-```
-## 运行
-
-NPU运行命令方式如下:
-
-对于所有的三个步骤程序来说,modelarts插件obs桶中的数据路径均要写到真正包含数据的那一个路径
-如在pc_plane文件夹中含有points、points_test等包含数据的文件夹
-modelarts插件中的数据路径写为 obs://cann-id2019/data/pc_palne/
-
-**生成测试数据**
-
-运行这一步我们需要的程序文件为gen_pt_test_data.py、modelarts_entry_Axang.py、genTestData.sh
-这三个文件中的代码均不需要修改
-最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
-为进行第二步模型训练,需要将生成旋转后的文件转移至obs桶中存放data的路径
-
-注:需要确保的是存在obs桶里的data文件名为points_test
-
-**训练与评价**
-
-运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_acc_train.py、train_full_1p.sh
-这三个文件中的代码均不需要修改
-最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
-为进行第三步,需要将生成的output文件转移至obs桶中存放data的路径
-
-注意:该次训练的模型保存在该次的obs文件夹中,进行第三步时又需要重启一次新的modelarts,因此我们需要将output文件中的
-checkpoint文件中最新模型的路径修改
-"/home/ma-user/modelarts/inputs/data_url_0/output"
-这样第三步才能跑出正确的精度指标
-
-**从所有训练样本中生成统计数据**
-
-运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_stat.py、genStatistical.sh
-这三个文件的代码均不需要修改
-运行成功后将会在屏幕上打印出关于精度相应的统计量值
-
-注意:由于加入了混合精度提高训练性能,导致该步出现未知错误,输出的误差均为0,因此建议将训练后的结果下载到本地
-修改checkpoint文件中模型的路径后,利用本地的GPU进行运行,实际上该步骤在运行时NPU成功启动但也未曾利用。
-
-## 训练结果
-**精度对比:**
-
-
-
-| 测地线误差(°) | 论文发布 | GPU(初始代码未改动版本) | GPU实测|NPU实测 |
-| ------------------------ | ------- | ----- | --------- |----------|
-| 平均值 | 1.63 | 2.58 | 3.98 | 2.92 |
-| 中值 | 0.89 | 1.68 | 2.6 | 1.7 |
-| 标准差 | 6.70 | 6.93 | 9.36 | 8.45 |
-
-相比于论文中的精度,我们NPU迁移后实测差距依然较大,但是与我们未对代码任何改动初始的版本在GPU上跑出来的精度相差较小,
-且对于相同的代码的代码改动,NPU上的精度优于GPU上的精度。需要注意的是,在NPU上运行程序时,我们采用混合精度来提升训练
-的性能,但是其中产生了未知的错误,导致代表的精度指标mean_degree_err在整个训练过程中始终为0,因此我们无法得知在NPU训练的
-260w步的过程中,精度指标是下降的过程是怎样的。值得庆幸的是通过NPU训练出的模型,能够在GPU上计算出精度,并且精度还不错。
-
-**性能对比:**
-
-取弹性云GPU运行的前2600步的global_step/sec平均值和NPU运行的前2600步的global_step/sec平均值进行对比,以达到性能对比的目的。
-
-| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
-| ------------------- | ------- | ------ | ------ |
-| global_step/sec| 无 | 87.64 | 101.24 |
-
-
--
Gitee
From 33b4df92491e137319c41788355526a0ed74e796 Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:57:05 +0000
Subject: [PATCH 20/88] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E5=90=8E=E7=9A=84readm?=
=?UTF-8?q?e=EF=BC=8C=E5=8A=A0=E5=85=A5=E4=BA=86NPU=E4=B8=8A=E8=BF=90?=
=?UTF-8?q?=E8=A1=8C=E7=9A=84=E6=9C=80=E6=96=B0=E7=BB=93=E6=9E=9C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/README.md | 229 ++++++++++++++++++
1 file changed, 229 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
new file mode 100644
index 000000000..5724c3409
--- /dev/null
+++ b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/README.md
@@ -0,0 +1,229 @@
+基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):CV**
+
+**版本(Version):**
+
+**修改时间(Modified) :2022.05.05**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**处理器(Processor):昇腾910**
+
+**描述(Description):使用训练好的SVD模型,评估对称正交化在点云对准中的应用效果。**
+
+概述
+
+给定两个三维点云图,利用SVD正交化过程SVDO+(M)将其投射到SO(3)上,要求网络预测最佳对齐它们的3D旋转。代码的训练逻辑是每训练10w步保存一个模型,并且在测试集上检验该模型的精度,最后比较的都是260w步的模型精度
+
+- 开源代码:
+
+ https://github.com/google-research/google-research/tree/master/special_orthogonalization。
+
+- 参考论文:
+
+ [An Analysis of SVD for Deep Rotation Estimation](https://arxiv.org/abs/2006.14616)
+
+- 参考实现:
+
+ obs://cann-id2019/gpu/
+
+- 相关迁移的工作:
+ 在进行代码迁移到NPU上时,输入的训练数据为点云数据,点云数据的shape为(N,3),其中N并不是固定的,因此在NPU上存在动态shape的问题,导致模型训练无法正常进行。我们为此想了三个解决方法:1、找出所有点云数据中最小的N,对于大于N的点云数据,仅取前N行的数据输入训练。2、找到所有点云数据中最大的N,对于小于N的点云进行补0操作,将所有数据固定为最大的N后,输入网络进行训练。3、找到所有点云数据中最大的N,对小于N的点云数据,从原数据中选择一个点云进行填补至行数为N,再将数据输入网络进行训练。该三种方法均成功解决了NPU上的动态shape问题,但是第一种方法删除了样本点,因此导致最后训练出的模型精度很差;第二种方法虽然并没有丢失样本信息,但是向数据中填入大量的0,改变了本来的代码逻辑,导致最后训练出的模型精度也并不高。对于第三种方法,即没有丢失样本信息,对每个点云数据中的某一个点云样本点进行重复操作,没有改变原始的代码逻辑,最后也获得了不错的精度表现。
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+## 默认配置
+
+- 数据集获取:obs://cann-id2019/dataset/
+
+- 训练超参
+
+ - log_step_count=200
+ - save_summaries_steps=25000
+ - train_steps=2600000
+ - save_checkpoints_steps=100000
+ - eval_examples=39900
+
+
+
+训练环境准备
+
+1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
+2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
+
+ 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
+
+ **表 1** 镜像列表
+
+
+ 镜像名称
+ |
+ 镜像版本
+ |
+ 配套CANN版本
+ |
+
+
+
+ |
+ 20.2.0
+ |
+ 20.2
+ |
+
+
+
+
+
+
+## 脚本参数
+
+```
+gen_pt_test_data_gpu.py 中的参数
+
+--input_test_files 传入需要进行旋转的点云数据集
+--output_directory 存储旋转后点云集的路径
+--random_rotation_axang boole型,若为真将会对传入的数据集采用指定方法旋转,默认为真
+--num_rotations_per_file 每个测试点云的随机旋转增加数。默认为100
+
+
+main_point_cloud_gpu.py 中的参数
+
+--pt_cloud_test_files 测试数据集路径
+--pt_cloud_train_files 熟练数据集路径
+--method 指定用于预测旋转的方式。选项为"svd", "svd-inf", or "gs"。默认为“svd”
+--checkpoint_dir 训练模型的存放位置
+--train_steps 训练迭代的次数。默认为2600000
+--save_checkpoints_steps 保存检查点的频率。默认为10000
+--log_step_count 日志记录一次的步数。默认为200
+--save_summaries_steps 保存一次summary的步数。默认为5000
+--learning_rate 默认为1e-5
+--lr_decay 如果为真,则衰减learning rate。默认为假
+--lr_decay_steps learning rate衰减步数。默认为35000
+--lr_decay_rate learning rate衰减速率。默认为0.95
+--predict_all_test 如果为真,则在最新的检查点上运行eval作业,并打印每个输入的误差信息。默认为假
+--eval_examples 测试样本的数量。默认为0
+--print_variable_names 打印模型变量名。默认为假
+--num_train_augmentations 增加每个输入点云的随机旋转数。默认为10
+
+```
+
+
+
+## 运行
+
+GPU运行命令如下:
+
+**生成测试数据**
+
+注:生成的文件points_test_modified、points已包含在dataset文件夹中。
+```bash
+# 将路径设置到训练点云图文件
+IN_FILES=/shapenet/data/pc_plane/points_test/*.pts
+
+NEW_TEST_FILES_DIR=/shapenet/data/pc_plane/points_test_modified
+
+AXANG_SAMPLING=True
+
+# 决定旋转轴角的分布
+AXANG_SAMPLING=True
+
+python -m special_orthogonalization.gen_pt_test_data_gpu --input_test_files=$IN_FILES --output_directory=$NEW_TEST_FILES_DIR --random_rotation_axang=$AXANG_SAMPLING
+```
+
+**训练与评价**
+```bash
+# 将路径设置到原始训练数据
+TRAIN_FILES=/shapenet/data/pc_plane/points/*.pts
+
+#将路径设置到旋转后的训练数据
+TEST_FILES=$NEW_TEST_FILES_DIR/*.pts
+
+# 指定旋转预测方式
+METHOD=svd
+
+# 指定ckpt、summaries、评价结果等的存储路径
+OUT_DIR=/path/to/model
+
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --log_step_count=200 --save_summaries_steps=25000 --pt_cloud_train_files=$TRAIN_FILES --pt_cloud_test_files=$TEST_FILES --train_steps=2600000 --save_checkpoints_steps=100000 --eval_examples=39900
+```
+
+**从所有训练样本中生成统计数据**
+```bash
+# 打印均值、中位数、标准差和分位数
+python -m special_orthogonalization.main_point_cloud_gpu --method=$METHOD --checkpoint_dir=$OUT_DIR --pt_cloud_test_files=$TEST_FILES --predict_all_test=True
+```
+## 运行
+
+NPU运行命令方式如下:
+
+对于所有的三个步骤程序来说,modelarts插件obs桶中的数据路径均要写到真正包含数据的那一个路径
+如在pc_plane文件夹中含有points、points_test等包含数据的文件夹
+modelarts插件中的数据路径写为 obs://cann-id2019/data/pc_palne/
+
+**生成测试数据**
+
+运行这一步我们需要的程序文件为gen_pt_test_data.py、modelarts_entry_Axang.py、genTestData.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第二步模型训练,需要将生成旋转后的文件转移至obs桶中存放data的路径
+
+注:需要确保的是存在obs桶里的data文件名为points_test
+
+**训练与评价**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_acc_train.py、train_full_1p.sh
+这三个文件中的代码均不需要修改
+最后生成的旋转后的数据文件存放在obs桶当次程序文件的output路径中,文件名为test_points_modified,
+为进行第三步,需要将生成的output文件转移至obs桶中存放data的路径
+
+注意:该次训练的模型保存在该次的obs文件夹中,进行第三步时又需要重启一次新的modelarts,因此我们需要将output文件中的
+checkpoint文件中最新模型的路径修改
+"/home/ma-user/modelarts/inputs/data_url_0/output"
+这样第三步才能跑出正确的精度指标
+
+**从所有训练样本中生成统计数据**
+
+运行这一步我们需要的程序文件为main_point_cloud.py、modelarts_entry_stat.py、genStatistical.sh
+这三个文件的代码均不需要修改
+运行成功后将会在屏幕上打印出关于精度相应的统计量值
+
+注意:由于加入了混合精度提高训练性能,导致该步出现未知错误,输出的误差均为0,因此建议将训练后的结果下载到本地
+修改checkpoint文件中模型的路径后,利用本地的GPU进行运行,实际上该步骤在运行时NPU成功启动但也未曾利用。
+
+## 训练结果
+**精度对比:**
+
+
+
+| 测地线误差(°) | 论文发布 | GPU(初始代码未改动版本) | GPU实测|NPU实测 |
+| ------------------------ | ------- | ----- | --------- |----------|
+| 平均值 | 1.63 | 2.58 | 3.98 | 2.92 |
+| 中值 | 0.89 | 1.68 | 2.6 | 1.7 |
+| 标准差 | 6.70 | 6.93 | 9.36 | 8.45 |
+
+相比于论文中的精度,我们NPU迁移后实测差距依然较大,但是与我们未对代码任何改动初始的版本在GPU上跑出来的精度相差较小,
+且对于相同的代码的代码改动,NPU上的精度优于GPU上的精度。需要注意的是,在NPU上运行程序时,我们采用混合精度来提升训练
+的性能,但是其中产生了未知的错误,导致代表的精度指标mean_degree_err在整个训练过程中始终为0,因此我们无法得知在NPU训练的
+260w步的过程中,精度指标是下降的过程是怎样的。值得庆幸的是通过NPU训练出的模型,能够在GPU上计算出精度,并且精度还不错。
+
+**性能对比:**
+
+取华为v100上GPU运行的前2w步的global_step/sec平均值和NPU运行的前2w步的global_step/sec平均值进行对比,以达到性能对比的目的。
+
+| 性能指标项 | 论文发布 | GPU实测 | NPU实测 |
+| ------------------- | ------- | ------ | ------ |
+| global_step/sec| 无 | 87.64 | 101.24 |
+
+
--
Gitee
From 4656d8ab0fbafd5db83906dfeeb2d4fa02bcd75f Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 13:57:31 +0000
Subject: [PATCH 21/88] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SVD=5FID2019=5Ffor=5FTensorflow/modelzoo=5F?=
=?UTF-8?q?level.txt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt | 3 ---
1 file changed, 3 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
deleted file mode 100644
index d648fdf46..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/modelzoo_level.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-FuncStatus:NOK
-PerfStatus:NOK
-PrecisionStatus:NOK
\ No newline at end of file
--
Gitee
From abdf3676e549b37d4793167c107e6390675f9d73 Mon Sep 17 00:00:00 2001
From: ab_dx_z <10158481+ab_dx_z@user.noreply.gitee.com>
Date: Sat, 7 May 2022 14:05:44 +0000
Subject: [PATCH 22/88] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SVD=5FID2019=5Ffor=5FTensorflow/GPU?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SVD_ID2019_for_Tensorflow/GPU/.keep | 0
.../GPU/GPU\347\262\276\345\272\246.txt" | 106 -----
.../GPU/gen_pt_test_data_gpu.py | 124 -----
.../SVD_ID2019_for_Tensorflow/GPU/license.txt | 28 --
.../cv/SVD_ID2019_for_Tensorflow/GPU/loss.log | 246 ----------
.../GPU/main_point_cloud_gpu.py | 423 ------------------
.../GPU/requirements.txt | 35 --
.../GPU/utils_gpu.py | 144 ------
...2\346\226\207\347\262\276\345\272\246.jpg" | Bin 30086 -> 0 bytes
9 files changed, 1106 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/.keep
delete mode 100644 "TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/GPU\347\262\276\345\272\246.txt"
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/gen_pt_test_data_gpu.py
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/license.txt
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/loss.log
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/main_point_cloud_gpu.py
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/requirements.txt
delete mode 100644 TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/utils_gpu.py
delete mode 100644 "TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/\350\256\272\346\226\207\347\262\276\345\272\246.jpg"
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/.keep b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/.keep
deleted file mode 100644
index e69de29bb..000000000
diff --git "a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/GPU\347\262\276\345\272\246.txt" "b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/GPU\347\262\276\345\272\246.txt"
deleted file mode 100644
index de31ae6b3..000000000
--- "a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/GPU\347\262\276\345\272\246.txt"
+++ /dev/null
@@ -1,106 +0,0 @@
-Evaluated 39900 examples
-Mean error: %f degrees 5.551389
-Median error: %f degrees 3.6492655
-Std: %f degrees 10.683777
-
-Percentiles:
- 1%: 0.703878
- 2%: 0.889114
- 3%: 1.011817
- 4%: 1.117676
- 5%: 1.203335
- 6%: 1.290285
- 7%: 1.362000
- 8%: 1.423815
- 9%: 1.484244
- 10%: 1.540022
- 11%: 1.593973
- 12%: 1.645681
- 13%: 1.698696
- 14%: 1.743833
- 15%: 1.792095
- 16%: 1.841431
- 17%: 1.893619
- 18%: 1.939569
- 19%: 1.985442
- 20%: 2.029315
- 21%: 2.077730
- 22%: 2.125596
- 23%: 2.174029
- 24%: 2.223344
- 25%: 2.272967
- 26%: 2.323889
- 27%: 2.370916
- 28%: 2.424628
- 29%: 2.477176
- 30%: 2.526464
- 31%: 2.579367
- 32%: 2.632026
- 33%: 2.682996
- 34%: 2.733374
- 35%: 2.784669
- 36%: 2.837522
- 37%: 2.893403
- 38%: 2.948823
- 39%: 3.002961
- 40%: 3.058895
- 41%: 3.115458
- 42%: 3.170274
- 43%: 3.233495
- 44%: 3.287655
- 45%: 3.344627
- 46%: 3.402717
- 47%: 3.463511
- 48%: 3.525312
- 49%: 3.588614
- 50%: 3.649158
- 51%: 3.710984
- 52%: 3.777194
- 53%: 3.840634
- 54%: 3.912314
- 55%: 3.978772
- 56%: 4.050912
- 57%: 4.124833
- 58%: 4.205145
- 59%: 4.269850
- 60%: 4.350731
- 61%: 4.429609
- 62%: 4.506891
- 63%: 4.590645
- 64%: 4.673866
- 65%: 4.769208
- 66%: 4.860751
- 67%: 4.957645
- 68%: 5.045778
- 69%: 5.145709
- 70%: 5.248520
- 71%: 5.356171
- 72%: 5.463465
- 73%: 5.576540
- 74%: 5.700179
- 75%: 5.826519
- 76%: 5.954271
- 77%: 6.075412
- 78%: 6.217035
- 79%: 6.359586
- 80%: 6.509334
- 81%: 6.681930
- 82%: 6.855579
- 83%: 7.045702
- 84%: 7.237360
- 85%: 7.455194
- 86%: 7.683445
- 87%: 7.928701
- 88%: 8.226621
- 89%: 8.539159
- 90%: 8.927838
- 91%: 9.329886
- 92%: 9.826632
- 93%: 10.504982
- 94%: 11.362451
- 95%: 12.505600
- 96%: 14.257383
- 97%: 17.200018
- 98%: 22.386629
- 99%: 37.106316
-100%: 179.459671
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/gen_pt_test_data_gpu.py b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/gen_pt_test_data_gpu.py
deleted file mode 100644
index ccef0b62f..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/gen_pt_test_data_gpu.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-# coding=utf-8
-# Copyright 2021 The Google Research Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Generate modified point cloud test data with fixed random rotations."""
-import glob
-import os
-import pathlib
-
-import utils_gpu
-from absl import app
-from absl import flags
-import numpy as np
-from scipy.stats import special_ortho_group
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('input_test_files1', '',
- 'Regular expression for the original input points files.')
-flags.DEFINE_string('input_test_files2', '',
- 'Regular expression for the original input test files.')
-flags.DEFINE_string('output_directory1', '',
- 'Output directory where new points files will be stored.')
-flags.DEFINE_string('output_directory2', '',
- 'Output directory where new test files will be stored.')
-flags.DEFINE_integer('num_rotations_per_file', 100,
- 'Number of random rotation augmentations per test point '
- 'cloud.')
-flags.DEFINE_boolean('random_rotation_axang', True,
- 'If true, samples random rotations using the method '
- 'from the original benchmark code. Otherwise samples '
- 'by Haar measure.')
-
-def gen_test_data():
- """Generate the new (modified) test data."""
- # Create output directory.
- os.makedirs(FLAGS.output_directory1, exist_ok=True)
- os.makedirs(FLAGS.output_directory2, exist_ok=True)
-
- # Get all test point cloud files in the original dataset.
- input_test_files1 = glob.glob(FLAGS.input_test_files1)
- input_test_files2 = glob.glob(FLAGS.input_test_files2)
-
- n = 1
- for in_file in input_test_files1:
- pts = np.loadtxt(in_file)
- m = pts.shape[0]
- if n < m:
- n = m
- print("over")
-
- for in_file in input_test_files1:
- out_file_prefix = pathlib.Path(in_file).stem
- pts = np.loadtxt(in_file) # N x 3
- data1 = np.random.rand(n-pts.shape[0], 3)
- pts = np.vstack((pts, data1))
- out_file1 = os.path.join(
- FLAGS.output_directory1, '%s.pts' % (out_file_prefix))
- np.savetxt(out_file1, pts)
- print("over")
-
- for in_file in input_test_files2:
- out_file_prefix = pathlib.Path(in_file).stem
- pts = np.loadtxt(in_file) # N x 3
- num_pts_to_keep = pts.shape[0] // 2
- pts = pts[:num_pts_to_keep, :] # N//2 x 3.
-
- for k in range(FLAGS.num_rotations_per_file):
- if FLAGS.random_rotation_axang:
- r = utils_gpu.random_rotation_benchmark_np(1)
- r = r[0]
- else:
- r = special_ortho_group.rvs(3)
- joined = np.float32(np.concatenate((r, pts), axis=0)) # (N//2+3) x 3.
- out_file = os.path.join(
- FLAGS.output_directory2, '%s_r%03d.pts' % (out_file_prefix, k))
- np.savetxt(out_file, joined)
- print("over")
-
-
-def main(unused_argv):
- gen_test_data()
-
-if __name__ == '__main__':
- app.run(main)
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/license.txt b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/license.txt
deleted file mode 100644
index 0384a2533..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/license.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/loss.log b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/loss.log
deleted file mode 100644
index dc3dfb67d..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/loss.log
+++ /dev/null
@@ -1,246 +0,0 @@
-2022-03-19 18:03:26,273 - tensorflow - INFO - Using config: {'_model_dir': '/root/RotationContinuity-master2/shapenet/data/pc_plane/to/model1', '_tf_random_seed': None, '_save_summary_steps': 25, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
-graph_options {
- rewrite_options {
- meta_optimizer_iterations: ONE
- }
-}
-, '_keep_checkpoint_max': None, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 200, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
-2022-03-19 18:03:26,273 - tensorflow - INFO - Not using Distribute Coordinator.
-2022-03-19 18:03:26,273 - tensorflow - INFO - Running training and evaluation locally (non-distributed).
-2022-03-19 18:03:26,274 - tensorflow - INFO - Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 100 or save_checkpoints_secs None.
-2022-03-19 18:03:26,277 - tensorflow - INFO - Skipping training since max_steps has already saved.
-2022-03-19 18:04:19,049 - tensorflow - INFO - Using config: {'_model_dir': '/root/RotationContinuity-master2/shapenet/data/pc_plane/to/model1', '_tf_random_seed': None, '_save_summary_steps': 25, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
-graph_options {
- rewrite_options {
- meta_optimizer_iterations: ONE
- }
-}
-, '_keep_checkpoint_max': None, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 200, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
-2022-03-19 18:04:19,050 - tensorflow - INFO - Not using Distribute Coordinator.
-2022-03-19 18:04:19,050 - tensorflow - INFO - Running training and evaluation locally (non-distributed).
-2022-03-19 18:04:19,050 - tensorflow - INFO - Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 100 or save_checkpoints_secs None.
-2022-03-19 18:04:19,054 - tensorflow - INFO - Skipping training since max_steps has already saved.
-2022-03-19 18:04:49,280 - tensorflow - INFO - Using config: {'_model_dir': '/root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2', '_tf_random_seed': None, '_save_summary_steps': 25, '_save_checkpoints_steps': 100, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
-graph_options {
- rewrite_options {
- meta_optimizer_iterations: ONE
- }
-}
-, '_keep_checkpoint_max': None, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 200, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
-2022-03-19 18:04:49,281 - tensorflow - INFO - Not using Distribute Coordinator.
-2022-03-19 18:04:49,281 - tensorflow - INFO - Running training and evaluation locally (non-distributed).
-2022-03-19 18:04:49,281 - tensorflow - INFO - Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 100 or save_checkpoints_secs None.
-2022-03-19 18:04:49,286 - tensorflow - WARNING - From /root/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/training/training_util.py:236: Variable.initialized_value (from tensorflow.python.ops.variables) is deprecated and will be removed in a future version.
-Instructions for updating:
-Use Variable.read_value. Variables in 2.X are initialized automatically both in eager and graph (inside tf.defun) contexts.
-2022-03-19 18:04:49,292 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:244: The name tf.gfile.Glob is deprecated. Please use tf.io.gfile.glob instead.
-
-2022-03-19 18:04:49,683 - tensorflow - WARNING - From /root/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.read_file is deprecated. Please use tf.io.read_file instead.
-
-2022-03-19 18:04:49,683 - tensorflow - WARNING - From /root/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.decode_csv is deprecated. Please use tf.io.decode_csv instead.
-
-2022-03-19 18:04:49,766 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:223: calling string_split (from tensorflow.python.ops.ragged.ragged_string_ops) with delimiter is deprecated and will be removed in a future version.
-Instructions for updating:
-delimiter is deprecated, please use sep instead.
-2022-03-19 18:04:49,932 - tensorflow - WARNING - From /root/special_orthogonalization2/utils_gpu.py:75: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.
-Instructions for updating:
-tf.py_func is deprecated in TF V2. Instead, there are two
- options available in V2.
- - tf.py_function takes a python function which manipulates tf eager
- tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
- an ndarray (just call tensor.numpy()) but having access to eager tensors
- means `tf.py_function`s can use accelerators such as GPUs as well as
- being differentiable using a gradient tape.
- - tf.numpy_function maintains the semantics of the deprecated tf.py_func
- (it is not differentiable, and manipulates numpy arrays). It drops the
- stateful argument making all functions stateful.
-
-2022-03-19 18:04:49,938 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:252: The name tf.data.make_one_shot_iterator is deprecated. Please use tf.compat.v1.data.make_one_shot_iterator instead.
-
-2022-03-19 18:04:49,961 - tensorflow - INFO - Calling model_fn.
-2022-03-19 18:04:49,966 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:74: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.
-
-2022-03-19 18:04:49,966 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:74: The name tf.AUTO_REUSE is deprecated. Please use tf.compat.v1.AUTO_REUSE instead.
-
-2022-03-19 18:04:49,966 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:75: conv1d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.
-Instructions for updating:
-Use `tf.keras.layers.Conv1D` instead.
-2022-03-19 18:04:49,967 - tensorflow - WARNING - From /root/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/layers/convolutional.py:218: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
-Instructions for updating:
-Please use `layer.__call__` method instead.
-2022-03-19 18:04:50,011 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:81: calling reduce_max_v1 (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.
-Instructions for updating:
-keep_dims is deprecated, use keepdims instead
-2022-03-19 18:04:50,033 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:99: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
-Instructions for updating:
-Use keras.layers.Dense instead.
-2022-03-19 18:04:50,055 - tensorflow - WARNING - From /root/special_orthogonalization2/utils_gpu.py:97: The name tf.svd is deprecated. Please use tf.linalg.svd instead.
-
-2022-03-19 18:04:50,079 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:171: The name tf.summary.scalar is deprecated. Please use tf.compat.v1.summary.scalar instead.
-
-2022-03-19 18:04:50,082 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:173: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.
-
-2022-03-19 18:04:50,083 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:185: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.
-
-2022-03-19 18:04:50,083 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:186: The name tf.get_collection is deprecated. Please use tf.compat.v1.get_collection instead.
-
-2022-03-19 18:04:50,083 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:186: The name tf.GraphKeys is deprecated. Please use tf.compat.v1.GraphKeys instead.
-
-2022-03-19 18:04:50,323 - tensorflow - INFO - Done calling model_fn.
-2022-03-19 18:04:50,325 - tensorflow - INFO - Create CheckpointSaverHook.
-2022-03-19 18:04:50,418 - tensorflow - WARNING - From /root/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py:1475: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
-Instructions for updating:
-Use tf.where in 2.0, which has the same broadcast rule as np.where
-2022-03-19 18:04:50,485 - tensorflow - INFO - Graph was finalized.
-2022-03-19 18:04:52,089 - tensorflow - INFO - Running local_init_op.
-2022-03-19 18:04:52,106 - tensorflow - INFO - Done running local_init_op.
-2022-03-19 18:04:52,544 - tensorflow - INFO - Saving checkpoints for 0 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:04:52,608 - tensorflow - INFO - model.ckpt-0 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:04:55,512 - tensorflow - INFO - loss = 32.202156, step = 0
-2022-03-19 18:04:57,008 - tensorflow - INFO - Saving checkpoints for 100 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:04:57,026 - tensorflow - INFO - model.ckpt-100 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:04:57,484 - tensorflow - INFO - Calling model_fn.
-2022-03-19 18:04:57,596 - tensorflow - WARNING - From /root/special_orthogonalization2/main_point_cloud_gpu.py:203: The name tf.metrics.mean is deprecated. Please use tf.compat.v1.metrics.mean instead.
-
-2022-03-19 18:04:57,606 - tensorflow - INFO - Done calling model_fn.
-2022-03-19 18:04:57,621 - tensorflow - INFO - Starting evaluation at 2022-03-19T18:04:57Z
-2022-03-19 18:04:57,684 - tensorflow - INFO - Graph was finalized.
-2022-03-19 18:04:57,687 - tensorflow - INFO - Restoring parameters from /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt-100
-2022-03-19 18:04:57,883 - tensorflow - INFO - Running local_init_op.
-2022-03-19 18:04:57,949 - tensorflow - INFO - Done running local_init_op.
-2022-03-19 18:04:59,509 - tensorflow - INFO - Evaluation [39/399]
-2022-03-19 18:05:00,470 - tensorflow - INFO - Evaluation [78/399]
-2022-03-19 18:05:00,986 - tensorflow - INFO - Evaluation [117/399]
-2022-03-19 18:05:01,473 - tensorflow - INFO - Evaluation [156/399]
-2022-03-19 18:05:01,813 - tensorflow - INFO - Evaluation [195/399]
-2022-03-19 18:05:02,217 - tensorflow - INFO - Evaluation [234/399]
-2022-03-19 18:05:02,537 - tensorflow - INFO - Evaluation [273/399]
-2022-03-19 18:05:02,756 - tensorflow - INFO - Evaluation [312/399]
-2022-03-19 18:05:02,876 - tensorflow - INFO - Evaluation [351/399]
-2022-03-19 18:05:03,157 - tensorflow - INFO - Evaluation [390/399]
-2022-03-19 18:05:03,209 - tensorflow - INFO - Evaluation [399/399]
-2022-03-19 18:05:03,291 - tensorflow - INFO - Finished evaluation at 2022-03-19-18:05:03
-2022-03-19 18:05:03,292 - tensorflow - INFO - Saving dict for global step 100: global_step = 100, loss = 2.0213404, mean_degree_err = 90.8583
-2022-03-19 18:05:03,399 - tensorflow - INFO - Saving 'checkpoint_path' summary for global step 100: /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt-100
-2022-03-19 18:05:04,530 - tensorflow - INFO - Saving checkpoints for 200 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:04,547 - tensorflow - INFO - model.ckpt-200 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:04,586 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:04,600 - tensorflow - INFO - global_step/sec: 22.0056
-2022-03-19 18:05:04,601 - tensorflow - INFO - loss = 18.108334, step = 200 (9.089 sec)
-2022-03-19 18:05:05,711 - tensorflow - INFO - Saving checkpoints for 300 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:05,728 - tensorflow - INFO - model.ckpt-300 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:05,765 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:06,893 - tensorflow - INFO - Saving checkpoints for 400 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:06,911 - tensorflow - INFO - model.ckpt-400 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:06,948 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:06,962 - tensorflow - INFO - global_step/sec: 84.6573
-2022-03-19 18:05:06,963 - tensorflow - INFO - loss = 19.658773, step = 400 (2.362 sec)
-2022-03-19 18:05:08,073 - tensorflow - INFO - Saving checkpoints for 500 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:08,089 - tensorflow - INFO - model.ckpt-500 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:08,126 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:09,261 - tensorflow - INFO - Saving checkpoints for 600 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:09,276 - tensorflow - INFO - model.ckpt-600 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:09,314 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:09,329 - tensorflow - INFO - global_step/sec: 84.4893
-2022-03-19 18:05:09,330 - tensorflow - INFO - loss = 15.302707, step = 600 (2.367 sec)
-2022-03-19 18:05:10,443 - tensorflow - INFO - Saving checkpoints for 700 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:10,461 - tensorflow - INFO - model.ckpt-700 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:10,498 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:11,624 - tensorflow - INFO - Saving checkpoints for 800 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:11,643 - tensorflow - INFO - model.ckpt-800 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:11,680 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:11,695 - tensorflow - INFO - global_step/sec: 84.5516
-2022-03-19 18:05:11,696 - tensorflow - INFO - loss = 18.000507, step = 800 (2.365 sec)
-2022-03-19 18:05:12,807 - tensorflow - INFO - Saving checkpoints for 900 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:12,822 - tensorflow - INFO - model.ckpt-900 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:12,860 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:13,986 - tensorflow - INFO - Saving checkpoints for 1000 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:14,002 - tensorflow - INFO - model.ckpt-1000 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:14,039 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:14,054 - tensorflow - INFO - global_step/sec: 84.7931
-2022-03-19 18:05:14,054 - tensorflow - INFO - loss = 20.527103, step = 1000 (2.359 sec)
-2022-03-19 18:05:15,176 - tensorflow - INFO - Saving checkpoints for 1100 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:15,192 - tensorflow - INFO - model.ckpt-1100 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:15,229 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:16,360 - tensorflow - INFO - Saving checkpoints for 1200 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:16,378 - tensorflow - INFO - model.ckpt-1200 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:16,415 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:16,430 - tensorflow - INFO - global_step/sec: 84.1547
-2022-03-19 18:05:16,431 - tensorflow - INFO - loss = 16.00374, step = 1200 (2.377 sec)
-2022-03-19 18:05:17,536 - tensorflow - INFO - Saving checkpoints for 1300 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:17,554 - tensorflow - INFO - model.ckpt-1300 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:17,591 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:18,715 - tensorflow - INFO - Saving checkpoints for 1400 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:18,731 - tensorflow - INFO - model.ckpt-1400 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:18,768 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:18,782 - tensorflow - INFO - global_step/sec: 85.0196
-2022-03-19 18:05:18,783 - tensorflow - INFO - loss = 8.673609, step = 1400 (2.352 sec)
-2022-03-19 18:05:19,894 - tensorflow - INFO - Saving checkpoints for 1500 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:19,909 - tensorflow - INFO - model.ckpt-1500 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:19,947 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:21,070 - tensorflow - INFO - Saving checkpoints for 1600 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:21,092 - tensorflow - INFO - model.ckpt-1600 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:21,129 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:21,144 - tensorflow - INFO - global_step/sec: 84.7018
-2022-03-19 18:05:21,144 - tensorflow - INFO - loss = 12.576811, step = 1600 (2.361 sec)
-2022-03-19 18:05:22,260 - tensorflow - INFO - Saving checkpoints for 1700 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:22,276 - tensorflow - INFO - model.ckpt-1700 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:22,313 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:23,442 - tensorflow - INFO - Saving checkpoints for 1800 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:23,458 - tensorflow - INFO - model.ckpt-1800 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:23,496 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:23,510 - tensorflow - INFO - global_step/sec: 84.5082
-2022-03-19 18:05:23,511 - tensorflow - INFO - loss = 25.950819, step = 1800 (2.367 sec)
-2022-03-19 18:05:24,627 - tensorflow - INFO - Saving checkpoints for 1900 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:24,642 - tensorflow - INFO - model.ckpt-1900 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:24,679 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:25,810 - tensorflow - INFO - Saving checkpoints for 2000 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:25,825 - tensorflow - INFO - model.ckpt-2000 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:25,863 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:25,877 - tensorflow - INFO - global_step/sec: 84.5026
-2022-03-19 18:05:25,878 - tensorflow - INFO - loss = 10.249855, step = 2000 (2.367 sec)
-2022-03-19 18:05:26,991 - tensorflow - INFO - Saving checkpoints for 2100 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:27,007 - tensorflow - INFO - model.ckpt-2100 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:27,044 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:28,175 - tensorflow - INFO - Saving checkpoints for 2200 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:28,190 - tensorflow - INFO - model.ckpt-2200 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:28,227 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:28,241 - tensorflow - INFO - global_step/sec: 84.5981
-2022-03-19 18:05:28,242 - tensorflow - INFO - loss = 10.412505, step = 2200 (2.364 sec)
-2022-03-19 18:05:29,344 - tensorflow - INFO - Saving checkpoints for 2300 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:29,360 - tensorflow - INFO - model.ckpt-2300 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:29,397 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:30,505 - tensorflow - INFO - Saving checkpoints for 2400 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:30,520 - tensorflow - INFO - model.ckpt-2400 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:30,558 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:30,572 - tensorflow - INFO - global_step/sec: 85.8075
-2022-03-19 18:05:30,573 - tensorflow - INFO - loss = 24.763023, step = 2400 (2.331 sec)
-2022-03-19 18:05:31,663 - tensorflow - INFO - Saving checkpoints for 2500 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:31,679 - tensorflow - INFO - model.ckpt-2500 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:31,716 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:32,818 - tensorflow - INFO - Saving checkpoints for 2600 into /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt.
-2022-03-19 18:05:32,834 - tensorflow - INFO - model.ckpt-2600 is not in all_model_checkpoint_paths. Manually adding it.
-2022-03-19 18:05:32,871 - tensorflow - INFO - Skip the current checkpoint eval due to throttle secs (60 secs).
-2022-03-19 18:05:33,174 - tensorflow - INFO - Calling model_fn.
-2022-03-19 18:05:33,301 - tensorflow - INFO - Done calling model_fn.
-2022-03-19 18:05:33,316 - tensorflow - INFO - Starting evaluation at 2022-03-19T18:05:33Z
-2022-03-19 18:05:33,379 - tensorflow - INFO - Graph was finalized.
-2022-03-19 18:05:33,382 - tensorflow - INFO - Restoring parameters from /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt-2600
-2022-03-19 18:05:33,562 - tensorflow - INFO - Running local_init_op.
-2022-03-19 18:05:33,627 - tensorflow - INFO - Done running local_init_op.
-2022-03-19 18:05:34,142 - tensorflow - INFO - Evaluation [39/399]
-2022-03-19 18:05:34,232 - tensorflow - INFO - Evaluation [78/399]
-2022-03-19 18:05:34,318 - tensorflow - INFO - Evaluation [117/399]
-2022-03-19 18:05:34,406 - tensorflow - INFO - Evaluation [156/399]
-2022-03-19 18:05:34,493 - tensorflow - INFO - Evaluation [195/399]
-2022-03-19 18:05:34,581 - tensorflow - INFO - Evaluation [234/399]
-2022-03-19 18:05:34,669 - tensorflow - INFO - Evaluation [273/399]
-2022-03-19 18:05:34,757 - tensorflow - INFO - Evaluation [312/399]
-2022-03-19 18:05:34,844 - tensorflow - INFO - Evaluation [351/399]
-2022-03-19 18:05:34,930 - tensorflow - INFO - Evaluation [390/399]
-2022-03-19 18:05:34,951 - tensorflow - INFO - Evaluation [399/399]
-2022-03-19 18:05:35,034 - tensorflow - INFO - Finished evaluation at 2022-03-19-18:05:35
-2022-03-19 18:05:35,035 - tensorflow - INFO - Saving dict for global step 2600: global_step = 2600, loss = 1.1534922, mean_degree_err = 59.039207
-2022-03-19 18:05:35,035 - tensorflow - INFO - Saving 'checkpoint_path' summary for global step 2600: /root/RotationContinuity-master2/shapenet/data/pc_plane/to/model2/model.ckpt-2600
-2022-03-19 18:05:35,035 - tensorflow - DEBUG - Calling exporter with the `is_the_final_export=True`.
-2022-03-19 18:05:35,053 - tensorflow - INFO - Loss for final step: 4.256054.
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/main_point_cloud_gpu.py b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/main_point_cloud_gpu.py
deleted file mode 100644
index a27b51b95..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/main_point_cloud_gpu.py
+++ /dev/null
@@ -1,423 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-# coding=utf-8
-# Copyright 2021 The Google Research Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Training and evaluation for the point cloud alignment experiment."""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import utils_gpu
-import time
-from absl import flags
-import numpy as np
-import tensorflow as tf
-import logging
-FLAGS = flags.FLAGS
-log = logging.getLogger('tensorflow')
-log.setLevel(logging.DEBUG)
-
-# General flags.
-flags.DEFINE_string('method', 'svd',
- 'Specifies the method to use for predicting rotations. '
- 'Choices are "svd", "svd-inf", or "gs".')
-flags.DEFINE_string('checkpoint_dir', '',
- 'Locations for checkpoints, summaries, etc.')
-flags.DEFINE_integer('train_steps', 2600000, 'Number of training iterations.')
-flags.DEFINE_integer('save_checkpoints_steps', 10000,
- 'How often to save checkpoints')
-flags.DEFINE_integer('log_step_count', 500, 'How often to log the step count')
-flags.DEFINE_integer('save_summaries_steps', 5000,
- 'How often to save summaries.')
-flags.DEFINE_float('learning_rate', 1e-5, 'Learning rate')
-flags.DEFINE_boolean('lr_decay', False, 'Decay the learning rate if True.')
-flags.DEFINE_integer('lr_decay_steps', 35000,
- 'Learning rate decays steps.')
-flags.DEFINE_float('lr_decay_rate', 0.95,
- 'Learning rate decays rate.')
-flags.DEFINE_boolean('predict_all_test', False,
- 'If true, runs an eval job on latest checkpoint and '
- 'prints the error for each input.')
-flags.DEFINE_integer('eval_examples', 0, 'Number of test examples.')
-flags.DEFINE_boolean('print_variable_names', False,
- 'Print model variable names.')
-
-# Flags only used in the point cloud alignment experiment.
-flags.DEFINE_integer('num_train_augmentations', 10,
- 'Number of random rotations for augmenting each input '
- 'point cloud.')
-flags.DEFINE_string('pt_cloud_train_files', '',
- 'Expression matching all training point files, e.g. '
- '/path/to/files/pc_plane/points/*.pts')
-flags.DEFINE_string('pt_cloud_test_files', '',
- 'Expression matching all modified test point files, e.g. '
- '/path/to/files/pc_plane/points_test/*.pts')
-flags.DEFINE_boolean('random_rotation_axang', True,
- 'If true, samples random rotations using the method '
- 'from the original benchmark code. Otherwise samples '
- 'by Haar measure.')
-
-
-def pt_features(batch_pts):
- """Input shape: [B, N, 3], output shape: [B, 1024]."""
- with tf.variable_scope('ptenc', reuse=tf.AUTO_REUSE):
- f1 = tf.layers.conv1d(inputs=batch_pts, filters=64, kernel_size=1)
- f1 = tf.nn.leaky_relu(f1)
- f2 = tf.layers.conv1d(inputs=f1, filters=128, kernel_size=1)
- f2 = tf.nn.leaky_relu(f2)
- f3 = tf.layers.conv1d(inputs=f2, filters=1024, kernel_size=1)
-
- f = tf.reduce_max(f3, axis=1, keep_dims=False)
- return f
-
-
-def regress_from_features(batch_features, out_dim):
- """Regress to a rotation representation from point cloud encodings.
-
- In Zhou et al, CVPR19, the paper describes this regression network as an MLP
- mapping 2048->512->512->out_dim, but the associated code implements it with
- one less layer: 2048->512->out_dim. We mimic the code.
-
- Args:
- batch_features: [batch_size, in_dim].
- out_dim: desired output dimensionality.
-
- Returns:
- A [batch_size, out_dim] tensor.
- """
- f1 = tf.layers.dense(batch_features, 512)
- f1 = tf.nn.leaky_relu(f1)
- f2 = tf.layers.dense(f1, out_dim)
- return f2
-
-
-def net_point_cloud(points1, points2, mode):
- """Predict a relative rotation given two point clouds.
-
- Args:
- points1: [batch_size, N, 3] float tensor.
- points2: [batch_size, N, 3] float tensor.
- mode: tf.estimator.ModeKeys.
-
- Returns:
- [batch_size, 3, 3] matrices.
- """
- f1 = pt_features(points1)
- f2 = pt_features(points2)
- f = tf.concat([f1, f2], axis=-1)
-
- if FLAGS.method == 'svd':
- p = regress_from_features(f, 9)
- return utils_gpu.symmetric_orthogonalization(p)
-
- if FLAGS.method == 'svd-inf':
- p = regress_from_features(f, 9)
- if mode == tf.estimator.ModeKeys.TRAIN:
- return tf.reshape(p, (-1, 3, 3))
- else:
- return utils_gpu.symmetric_orthogonalization(p)
-
- if FLAGS.method == 'gs':
- p = regress_from_features(f, 6)
- return utils_gpu.gs_orthogonalization(p)
-
-
-def model_fn(features, labels, mode, params):
- """The model_fn used to construct the tf.Estimator."""
- del labels, params # Unused.
- if mode == tf.estimator.ModeKeys.TRAIN:
- # Training data has point cloud of size [1, N, 3] and random rotations
- # of size [1, FLAGS.num_train_augmentations, 3, 3]
- rot = features['rot'][0]
- num_rot = FLAGS.num_train_augmentations
- batch_pts1 = tf.tile(features['data'], [num_rot, 1, 1])
- # In this experiment it does not matter if we pre or post-multiply the
- # rotation as long as we are consistent between training and eval.
- batch_pts2 = tf.matmul(batch_pts1, rot) # post-multiplying!
- else:
- # Test data has point cloud of size [1, N, 3] and a single random
- # rotation of size [1, 3, 3]
- batch_pts1 = features['data']
- rot = features['rot']
- batch_pts2 = tf.matmul(batch_pts1, rot)
- rot = tf.reshape(rot, (-1, 3, 3))
-
- # Predict the rotation.
- r = net_point_cloud(batch_pts1, batch_pts2, mode)
-
- # Compute the loss.
- loss = tf.nn.l2_loss(rot - r)
-
- # Compute the relative angle in radians.
- theta = utils_gpu.relative_angle(r, rot)
-
- # Mean angle error over the batch.
- mean_theta = tf.reduce_mean(theta)
- mean_theta_deg = mean_theta * 180.0 / np.pi
-
- # Train, eval, or predict depending on mode.
- if mode == tf.estimator.ModeKeys.TRAIN:
- tf.summary.scalar('train/loss', loss)
- tf.summary.scalar('train/theta', mean_theta_deg)
- global_step = tf.train.get_or_create_global_step()
-
- if FLAGS.lr_decay:
- learning_rate = tf.train.exponential_decay(
- FLAGS.learning_rate,
- global_step,
- FLAGS.lr_decay_steps,
- FLAGS.lr_decay_rate)
- else:
- learning_rate = FLAGS.learning_rate
-
- tf.summary.scalar('train/learning_rate', learning_rate)
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
- with tf.control_dependencies(update_ops):
- train_op = optimizer.minimize(loss, global_step=global_step)
- return tf.estimator.EstimatorSpec(
- mode=mode,
- loss=loss,
- train_op=train_op)
-
- if mode == tf.estimator.ModeKeys.EVAL:
- if FLAGS.predict_all_test:
- print_error_op = tf.print('error:', mean_theta_deg)
- with tf.control_dependencies([print_error_op]):
- eval_metric_ops = {
- 'mean_degree_err': tf.metrics.mean(mean_theta_deg),
- }
- else:
- eval_metric_ops = {
- 'mean_degree_err': tf.metrics.mean(mean_theta_deg),
- }
-
- return tf.estimator.EstimatorSpec(
- mode=mode,
- loss=loss,
- eval_metric_ops=eval_metric_ops)
-
- if mode == tf.estimator.ModeKeys.PREDICT:
- pred = {'error': mean_theta_deg}
- return tf.estimator.EstimatorSpec(
- mode=mode,
- predictions=pred)
-
-
-def train_input_fn():
- """Generate training data iterator from the .pts files."""
- def _file_to_matrix(pts_path):
- """Read Nx3 point cloud from a .pts file."""
- file_buffer = tf.read_file(pts_path)
- lines = tf.string_split([file_buffer], delimiter='\n')
- lines1 = tf.string_split(lines.values, delimiter='\r')
- values = tf.stack(tf.decode_csv(lines1.values,
- record_defaults=[[0.0], [0.0], [0.0]],
- field_delim=' '))
- values = tf.transpose(values)# 3xN --> Nx3.
- # The experiment code in
- # github.com/papagina/RotationContinuity/.../shapenet/code/train_pointnet.py
- # only used the first half of the points in each file.
- return values[:(tf.shape(values)[0]// 2), :]
-
-
-
- def _random_rotation(pts):
- """Attach N random rotations to a point cloud."""
- if FLAGS.random_rotation_axang:
- rotations = utils_gpu.random_rotation_benchmark(FLAGS.num_train_augmentations)
- else:
- rotations = utils_gpu.random_rotation(FLAGS.num_train_augmentations)
- return pts, rotations
-
- pts_paths = tf.gfile.Glob(FLAGS.pt_cloud_train_files)
- dataset = tf.data.Dataset.from_tensor_slices(pts_paths)
- dataset = dataset.map(_file_to_matrix)
- dataset = dataset.cache() # Comment out if memory cannot hold all the data.
- dataset = dataset.shuffle(buffer_size=50, reshuffle_each_iteration=True)
- dataset = dataset.repeat()
- dataset = dataset.map(_random_rotation)
- dataset = dataset.batch(1)
- iterator = tf.data.make_one_shot_iterator(dataset)
- batch_data, batch_rot = iterator.get_next()
- features_dict = {'data': batch_data, 'rot': batch_rot}
- batch_size = tf.shape(batch_data)[0]
- batch_labels_dummy = tf.zeros(shape=(batch_size, 1))
- return (features_dict, batch_labels_dummy)
-
-
-
-
-def eval_input_fn():
- """Generate test data from *modified* .pts files.
-
- See README and comments below for details on how the data is modified.
-
- Returns:
- A tuple of features and associated labels.
- """
- def _file_to_matrix(pts_path):
- """Read Nx3 point cloud and 3x3 rotation matrix from a .pts file.
-
- The test data is a modified version of the original files. For each .pts
- file we have (1) added a 3x3 rotation matrix for testing, and (2) removed
- the second half of the point cloud since it is not used at all.
-
- Args:
- pts_path: path to a .pts file.
-
- Returns:
- A Nx3 point cloud.
- A 3x3 rotation matrix.
- """
- file_buffer = tf.read_file(pts_path)
- lines = tf.string_split([file_buffer], delimiter='\n')
- lines1 = tf.string_split(lines.values, delimiter='\r')
- values = tf.stack(tf.decode_csv(lines1.values,
- record_defaults=[[0.0], [0.0], [0.0]], field_delim=' '))
- values = tf.transpose(values) # 3xN --> Nx3.
- # First three rows are the rotation matrix, remaining rows the point cloud.
- rot = values[:3, :]
- return values[4:, :], rot
-
- pts_paths = tf.gfile.Glob(FLAGS.pt_cloud_test_files)
- dataset = tf.data.Dataset.from_tensor_slices(pts_paths)
- dataset = dataset.map(_file_to_matrix)
- dataset = dataset.batch(1)
- iterator = tf.data.make_one_shot_iterator(dataset)
- batch_data, batch_rot = iterator.get_next()
- features_dict = {'data': batch_data, 'rot': batch_rot}
- batch_size = tf.shape(batch_data)[0]
- batch_labels_dummy = tf.zeros(shape=(batch_size, 1))
- return (features_dict, batch_labels_dummy)
-
-
-def print_variable_names():
- """Print variable names in a model."""
- params = {'dummy': 0}
- estimator = tf.estimator.Estimator(
- model_fn=model_fn,
- model_dir=FLAGS.checkpoint_dir,
- params=params)
- names = estimator.get_variable_names()
- for name in names:
- print(name)
-
-
-def predict_all_test():
- """Print error statistics for the test dataset."""
- params = {'dummy': 0}
- estimator = tf.estimator.Estimator(
- model_fn=model_fn,
- model_dir=FLAGS.checkpoint_dir,
- params=params)
- evals = estimator.predict(input_fn=eval_input_fn, yield_single_examples=False)
-
- # Print error statistics.
- all_errors = [x['error'] for x in evals]
- errors = np.array(all_errors)
- print('Evaluated %d examples'%np.size(errors))
- print('Mean error: %f degrees', np.mean(errors))
- print('Median error: %f degrees', np.median(errors))
- print('Std: %f degrees', np.std(errors))
- sorted_errors = np.sort(errors)
- n = np.size(sorted_errors)
- print('\nPercentiles:')
- for perc in range(1, 101):
- index = np.int32(np.float32(n * perc) / 100.0) - 1
- print('%3d%%: %f'%(perc, sorted_errors[index]))
-
-
-def train_and_eval():
- """Train and evaluate a model."""
- # create formatter and add it to the handlers
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-
- # create file handler which logs even debug messages
- fh = logging.FileHandler('loss.log')
- fh.setLevel(logging.DEBUG)
- fh.setFormatter(formatter)
- log.addHandler(fh)
-
- save_summary_steps = FLAGS.save_summaries_steps
- save_checkpoints_steps = FLAGS.save_checkpoints_steps
- log_step_count = FLAGS.log_step_count
-
- config = tf.estimator.RunConfig(
- save_summary_steps=save_summary_steps,
- save_checkpoints_steps=save_checkpoints_steps,
- log_step_count_steps=log_step_count,
- keep_checkpoint_max=None)
-
- params = {'dummy': 0}
- estimator = tf.estimator.Estimator(
- model_fn=model_fn,
- model_dir=FLAGS.checkpoint_dir,
- config=config,
- params=params)
-
- train_spec = tf.estimator.TrainSpec(
- input_fn=train_input_fn,
- max_steps=FLAGS.train_steps)
-
- eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
- start_delay_secs=60,
- steps=FLAGS.eval_examples,
- throttle_secs=60)
-
- tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
-
-
-def main(argv=None): # pylint: disable=unused-argument
- if FLAGS.print_variable_names:
- print_variable_names()
- return
-
- if FLAGS.predict_all_test:
- predict_all_test()
- else:
- train_and_eval()
-
-
-if __name__ == '__main__':
- tf.app.run()
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/requirements.txt b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/requirements.txt
deleted file mode 100644
index 1f2a5e41d..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/requirements.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-absl-py==0.11.0
-astunparse==1.6.3
-cachetools==4.1.1
-certifi==2020.6.20
-chardet==3.0.4
-gast==0.3.3
-google-auth==1.23.0
-google-auth-oauthlib==0.4.2
-google-pasta==0.2.0
-grpcio==1.33.2
-h5py==2.10.0
-idna==2.10
-importlib-metadata==2.0.0
-Keras-Preprocessing==1.1.2
-Markdown==3.3.3
-numpy==1.18.5
-oauthlib==3.1.0
-opt-einsum==3.3.0
-protobuf==3.13.0
-pyasn1==0.4.8
-pyasn1-modules==0.2.8
-requests==2.24.0
-requests-oauthlib==1.3.0
-rsa==4.6
-scipy==1.5.3
-six==1.15.0
-tensorboard==2.3.0
-tensorboard-plugin-wit==1.7.0
-tensorflow==2.3.1
-tensorflow-estimator==2.3.0
-termcolor==1.1.0
-urllib3==1.25.11
-Werkzeug==1.0.1
-wrapt==1.12.1
-zipp==3.4.0
diff --git a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/utils_gpu.py b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/utils_gpu.py
deleted file mode 100644
index a08b4fa97..000000000
--- a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/utils_gpu.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-# coding=utf-8
-# Copyright 2021 The Google Research Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Utility functions."""
-import numpy as np
-from scipy.stats import special_ortho_group
-import tensorflow as tf
-
-
-def relative_angle(r1, r2):
- """Relative angle (radians) between 3D rotation matrices."""
- rel_rot = tf.matmul(tf.transpose(r1, perm=[0, 2, 1]), r2)
- trace = rel_rot[:, 0, 0] + rel_rot[:, 1, 1] + rel_rot[:, 2, 2]
- cos_theta = (trace - 1.0) / 2.0
- cos_theta = tf.minimum(cos_theta, tf.ones_like(cos_theta))
- cos_theta = tf.maximum(cos_theta, (-1.0) * tf.ones_like(cos_theta))
- theta = tf.acos(cos_theta)
- return theta
-
-
-def random_rotation_benchmark_np(n):
- """Sample a random 3D rotation by method used in Zhou et al, CVPR19.
-
- This numpy function is a copy of the PyTorch function
- get_sampled_rotation_matrices_by_axisAngle() in the code made available
- for Zhou et al, CVPR19, at https://github.com/papagina/RotationContinuity/.
-
- Args:
- n: the number of rotation matrices to return.
-
- Returns:
- [n, 3, 3] np array.
- """
- theta = np.random.uniform(-1, 1, n) * np.pi
- sin = np.sin(theta)
- axis = np.random.randn(n, 3)
- axis = axis / np.maximum(np.linalg.norm(axis, axis=-1, keepdims=True), 1e-7)
- qw = np.cos(theta)
- qx = axis[:, 0] * sin
- qy = axis[:, 1] * sin
- qz = axis[:, 2] * sin
-
- xx = qx*qx
- yy = qy*qy
- zz = qz*qz
- xy = qx*qy
- xz = qx*qz
- yz = qy*qz
- xw = qx*qw
- yw = qy*qw
- zw = qz*qw
-
- row0 = np.stack((1-2*yy-2*zz, 2*xy-2*zw, 2*xz+2*yw), axis=-1)
- row1 = np.stack((2*xy+2*zw, 1-2*xx-2*zz, 2*yz-2*xw), axis=-1)
- row2 = np.stack((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), axis=-1)
- matrix = np.stack((row0, row1, row2), axis=1)
-
- return matrix
-
-
-def random_rotation_benchmark(n):
- """A TF wrapper for random_rotation_benchmark_np()."""
- mat = tf.py_func(
- func=lambda t: np.float32(random_rotation_benchmark_np(t)),
- inp=[n],
- Tout=tf.float32,
- stateful=True)
- return tf.reshape(mat, (n, 3, 3))
-
-
-def random_rotation(n):
- """Sample rotations from a uniform distribution on SO(3)."""
- mat = tf.py_func(
- func=lambda t: np.float32(special_ortho_group.rvs(3, size=t)),
- inp=[n],
- Tout=tf.float32,
- stateful=True)
- return tf.reshape(mat, (n, 3, 3))
-
-
-def symmetric_orthogonalization(x):
- """Maps 9D input vectors onto SO(3) via symmetric orthogonalization."""
- # Innner dimensions of the input should be 3x3 matrices.
- m = tf.reshape(x, (-1, 3, 3))
- _, u, v = tf.svd(m)
- det = tf.linalg.det(tf.matmul(u, v, transpose_b=True))
- r = tf.matmul(
- tf.concat([u[:, :, :-1], u[:, :, -1:] * tf.reshape(det, [-1, 1, 1])], 2),
- v, transpose_b=True)
- return r
-
-
-def gs_orthogonalization(p6):
- """Gram-Schmidt orthogonalization from 6D input."""
- # Input should be [batch_size, 6]
- x = p6[:, 0:3]
- y = p6[:, 3:6]
- xn = tf.math.l2_normalize(x, axis=-1)
- z = tf.linalg.cross(xn, y)
- zn = tf.math.l2_normalize(z, axis=-1)
- y = tf.linalg.cross(zn, xn)
- r = tf.stack([xn, y, zn], -1)
- return r
diff --git "a/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/\350\256\272\346\226\207\347\262\276\345\272\246.jpg" "b/TensorFlow/contrib/cv/SVD_ID2019_for_Tensorflow/GPU/\350\256\272\346\226\207\347\262\276\345\272\246.jpg"
deleted file mode 100644
index 1f75f76fc00025f29df14a8da23bd8350a0526ed..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 30086
zcmb4qb981;^XC)Wwrx)AJh82bZBK05wkNhH#>9MLPmGylVs5_gzWdXj{bT!{)8}?q
z-KxHIyKnWUtNUa1V;6ub3zPu@z`(!&MxPhpV*?-#fQ5#Jfrf&Gfq{X8gM~-LL_$PB
zK*U4GK*1!!Cm|-nCnO}LWTqh{XQUt`r2Rt2$jZja$w@-PE6Bqxz|6tP{*MqaI5;>&
z1VmgUBwThfLNfOM@ANSUK!XJ%0ylsFBL{$^fkB{weGCH#0AK)!&(?wg{v$y^LV-hr
z!F&>N{~`QW5&!@O4gvYG4nTwe1AwDKpng_7>;eEVse*cwPTuWVskcf@Mz?In3xcjc
zzka!Cd5t=$_Y#BT1>?}FKyAug&+Ov-z@a;m
zS5H~W0=*G|XHQu3y!&D6G;vQmO?d_i>D|H;Lqw5REriCgsUXLsrl5MhiTJ|Z;X85J
zar;#K`MAA6{7;J`Zs)T=0D$ctk}e!MuhFn7l!Or$Bc38xV`c->tc6^b;f^&X1{`w`
zbBOwNzkLI|PYu3T8_Orxx7to?f3;qCUacFveNwC*2Z+65LXr~wv_am-9V)Pj`mh>u
zF^JL(Yt;3B%mkE<7lHU?R%#ac3z;7!Dfx#lA2^*6Z*$JNA^iX00RUjj>a_n|LSZ>$
zgU2T!6S6wirEC{-PN+c3Df=62pDSni%{wt!Ms=WOwm};_Z8OhO+fw8|Mf`JzC^%tZ
z5PwPRJ^e28N07=SZo@gl^%gpy;2v}8%x6cw8^nnBm{(qE3k&&K4o4PQQrSB)W%bgx!qx^@C8-s1E}_R|_j$eglOF({6(3<+of&YE
z|0)-C`e;^$mt
zuxBbo%@J@yS^(ILCK2+UX>t!$-OJIJIYNW2?
zW?c_wJlk~xZ_&!x}to=9^QCjH=Dr)m2!E$pLLe2gmW!W;PS;dY61q
ziXok5sL`sRC+k`bUChIR7-jeQugB`OU9~f=_L^S&7^I(SIa-{jzj0h;Vc(6V-;n|k
z?Q;(xObXm^IiBy^L%@@KOtmECb`%bNtR-1NnT#n($>#Ft~
zldP5<$Gn69b5&xzlD_aH{7}9BM;WwcttXcxoM0~USumk}>_M!#Tz4b{absvpf)V=L$~#(8(O$kv-V=P1rTp9%ptU)!qU
zJYLOP1krgOrH>1ti4Rrj33{^l1w7c=Oi5=pLyM{j!vi)j@?DM+QvvEBvPoM=`BYW9
zV@OOY;ReQ%DY73j{F>DaOJ7Y8J
z)TctQBSAqmdSOw#4C(|RAweI45m;7*NELBQIdFw2HqT1_VirhiD@AtXhayPqKU0|t
zpku(F2wpEWzZ12HE&1g#Y8Xz2t^)5M?VWL|V$b#UN8qf%rK`Vnbn9(YQuTCe5(y4Q
z;HRF$UPWu!x(@)g`+FU+#araW>1ZLb>Eco94_&d#~FY~#RoE3^;@Y%Up_jb2#C(5;D
zKl2>?X9}DCvE=`i*t-_-n2Zto|1ZM-S4F_5<6uVxfP;fUf`frWL4d>j^EfyJ7$g7<
z9Rm{zm6enYi;SE?nUX_Y=VUIV+jUv9_FoU7_qBw#B+UsOVdz(H0
zm{@B%-uwhizN#Gpb&xMUy
zbwKvX(z+_>kx)ZduxTe*jiD0V{+trdU90V2)4#wG;sZ#pCxdZdXwuXWWr7&M!aOP-
z>_mQd^=&FE-B}55tj;N@VQZ+F7>*Jl%}+K1H}p|rH_$bIX*OmR(8rlHQ+A1>_mOE>
z{OsBmXk-m)>~uhTyOawW7|Q7+03%wOT16!{7^kJZ>07@s%w$W^KCd_3c|Qrf0zIm!
zn#`i_<;rbONQ~7rh45fZ4tLPXMv%c_{rN5)q$~p7h)IiVOYVF}m7pQX=smuH0862|
zfgU>fEu%xdft!pepXvrYy%V#B%0{=SH7esLJ=e6`6dSxyzm*2MK-hkia%KWTwq7|nD*4k_jqvP?RqMZRKgc>7lFDlL`bN?+8=2TeM^kEq|By}@Oz5Ho^7BTzhs
z>DoplxMu4D;liY8ppX8(m1o=qu1%
z3OmrD!F)*!&?|Pwd)-G}He)q64A@&}SQut4?&sXR8?pgJCM?_Pl&c7b(Y9&s3ZLZOSHvg~}e>Y6d~3p3r9{hDyi^
zx4*J+0&ZlX{g?`OZ_7g>c4%+VU=cfvWvx2=2t-{h7{IKsnSxhGS_m1e`0(e%a;#-o
zc}%W1eS)DO=+Bc-)0;d=g_pz7VHgDi!deiUk-da++i%o#P;BvNt80B)GpB7Qbq~8O
zG*&_uywqd5I7fM#@*hz&<9gc5;l7HPT~KR(;TZb>IHdMRBvWLA;ALxoN$I!SmGiu6
zDm{j&sn^m0t~+=vl-W*D(82@$6MxhjXRyowvEztSHLx8EJlT3QVLtxz=l4b3Ffy1E
zqd=*pamNijR8)kuc7TYJs5kaFdScX_(>>mmIT1W_{9c1fhsVJ98S~57d_M-OY8AA@
z$V~hPKzMYFNtT-cwVKHU$3E6-t(pEpXJgH*&Vd9>&sV8QWu@DEu2Mzx+d5T;p8-+I
z!*I>y5Qt8gEMJHm>tLT0$SV*F);(BRxR?-thImD|e=LTpR&Xl(x?cDFy+k`-c3%Uq
zIPNf&F0(&j`8tUO+Z?1v9t{JYEe@WnNo0PysF`Q(kZqHB4~v=zi+Iiui_P#b$Y10P
z*#fWH8XbX|C=cZXpx@P5L45IUe~@|;(i~+7-Iom1Ya)6YPHIkh@JH9q{c~*KRW0>}
zk3$O^fs2-mkp}sv68>&s#4C<2l^34D8P5xUsE{1zD3xScaCe_nL;(
z7ketI`^<>iFdXN4ArW?WjiLQP;}XZ^>6Z+0+HL-GQ-Fm?akm9m)YoF+_S~EXFrQFG
zL@!UUBxzndG&Z(JX~^AVs9e!*GQ57g=jH5<22bmoD@|&E)BuvSxUv-@gncJ7TM4;4ccsHXAmlAjQ^UgR6F;m}>x67?S(Y?&%x4IZ(v~
zbY7PKDB|+LVa8{*pa^T5;m&~ljw5%`gk(?Jr*Z(-PwFoqz4=XSuq#E0{*#oQ4%#u%
ziCuKkfvJBAhbfpDUAxn)gEi^%N1Enw2wFB7vS=Qi@W-1m@jpm=g$KCK^4`mLyb4C*o*Axo@IEyM?`8&^m@ckQ#eG{~H
z67(+{WZ?*l)Ypk-M0n%dq&6?6i-G9WB#V9KE!J;E$SA_5dP#L;djDl2OZpHcBR1SN
z&NN&~DPsd=rsdGh$ns@9*3!{VO3?yzacKUHfU9GFkoCfvZ)T1vcYG#bXNW2kas0)%
z5?$3=Tbg66COsjvX4yS`LoCR(SBP6t<#bp$ntei(zFFO*1buNbpwg~|j7zxO#{IV+
zdY*wgInu8+KBcUwRmT~*>(4KtYLU0(@xD#2C^(dz_0VV?+Sp8F|Z)nEs
zzgys<`ZIIAf5BR~GE2xS&9@r!KqCgRW~H8vNPIW)KO%^TU2o+*Wr8s
zG>~vzryy$t2+V61Hj{r<^Vko!F@^7Sbdoi2usKV@wNm!;8datmwN>`(7(5|}OlKw(EqM=|tDZs|)j6&bb9WACZ=n8SP%^gkBO)P%@pn1UDI`xV1Tp-iYY*e^KbE>n2*NaRzrS`KUZ2{
zms&H+zW>gKI`whZ>+`-p{W}YD=!n&;XZx>JK`xyUHa|S7kN=%PO&kIG%~&5n%_&d9
z%|`c&Hy$f!2li4LIn^5zQE(%=whVEZc3~jQmxRf8@|$%0G7#mORM;Eso$vuDbr$82A#C!G^>z8bs12cRU9mm9fTiaTDL?yKCfsv9^txvACB{AhDz+%Qj@
zqrC^h*k^GtuP%Z^@=M~vfVDm0I?vA}tBwTBcNsOP*md3(=S4Ds3P|LvL^lsuqfrdB
z8@Ex>6N9w1dhE27czFeFdAD#(P6>9}P^lk@4;I1W-Vms*@V$AEIe4P>1C^x1MeajH
zl8Pp0(YaXZu##Mb?-0ryJX$q?VF{6hDzw~P&BO+DRJ8Gy&U|-cLA){poMA}-n^2jE
zRDUg*C{%+{FM8l{kj`L7UP$*-aYc=!PUIOj7B3mwU6*Sy#vn9t#mP}l
z?GO@1@{EgI6^+QvqQd=n+Tr;yB!1j{fGSm?-|&DQp2HvuAqu8aK8FPeTIF@Bw>1nF
z>+kVP7?Cdu7%^w)&zXe`wkf+_QKn&le*+G7d+(tqv}+K@
zM)AvS%rN6pnLxh@6NHx0@|Cs3=IX$6SbP>N|&AXUEC`4n8z*b!j=!#9=iKJFHnwHYjB5o}AGyUE#nmNC;
z=(!31bR|B3JXJm)G5&}51F)-TbgP_EcEKlGP>nsF$w{$|WAbOt$yEM{4#M;kp4zH&
zef;oq@d48Vp;_yR0OgP9Y~EBxxwWjU%)a845b<>%1tGC76m(J+j8ZQ)mN6%idsmtS
z8?wAAr|-FuT5G3kr$=qgiJ+QvJk>LXC*5>I0+wLR(l$$WTMJjSFEgb)
ze~_fiCz1q%0E2=8M}&Zaf%xn%VBlzg&$oxfAZHUrWmhqFCZ)h+H;dErKokhpz9R=
zl628*iSgUgP&S=yu>sucgkK&n1m*xTGY?9idOj2%@3%&Gp@@5Ct;BsB9c^4p%gsWm
z3)f(wH*&ue#acWJZf-XLB?B}}!LZ<_oDnq?!(&t-+|tcGJ2wf2s|RA7{G3+9ncR0*
zA|LzJ;g^1CN2QqeIVP9gtt_>yy}imGv-~l3TI4+6-)@HOVK3{0U_|xAY*k=ZVF@x#e6zJ^{}Uc?pKFHB8B%7j
z=D-HuHVSC3$+pkj3B%R`f1L%@=5Ktpjke2T2kN)hU%c&2?(qG5v3(ISlZI`qXF{^W
zmZLAxe%g#$s73IEUhHO~cbf_*(>0qU^!MajUEfH~KsX!ILiJj?&(F<2KGhSNO|ETg
zCih~i4WC~p-px?$PfAbo;zN)sC=0MCw17Wzf!}yM>v8=wj?UYg{bAE@8rR+#lTA<^*X?sg
zVt^SSyRN!MzB3ThF9@m)iF&&%DL~twQ`X9gkS2Vj%R?%)GZDZ8OT
z7^-Glb0Ba!{sWga7t>1M-1Pl&_Eg6GH*)}g?rel`bf3$a9&el6QuMaNif|b|$ftF3
zly3bnFaNA@J}0w`ht3@>*H&+9!BHrk4WPVu^J1T_t~WM!7SOCoNL6P^&HCyoEov1rd}-S(peMakJT&p;~3
zZm68l#&|5#oJ5v(X>RDLGt~2K^|qKjQ(DU+`mt*lt-}7tQd-$wRQ1
zZVE|0D5K*}OuPDXQ_0zAvV-Cok!Ar(Y(%5-R<*7KW}DpQPR;C6)Z
zgjM+|n*fNj4A^Y<{RYZQ_0V3CXpOMir%T~@VN7?NJY>eC2$S}>4C%1t+q0MzOj|5Y
zYP|Y%myE;pSe|6HO~_efk<*6iK?<-qhI`hk@kkcs?Xu=A*oxM{x8)@uF?BM^E8)Qe
zD~KW&e~2t2^o6pzDgk2E*~s*Nm+!F<*$KzYvUlAb#Z|qO=|y2G?@6U{@5&Am5pT}p
zx!hwK56IybpTNA10B-?XIW~v(1@5kYA~_#cZ9Gh_54n2Cpqgz-wTDU8+cKkCzV|mh
zF28(%&jn8uiFbX`Tp&mmL+P?gCu*anHH6x;J$G^2T3x>^OMjaS>W9
z(-Wros0y)bNMkmh~E?$KXZ+g93EV5H)!rG_!EK*sVPOXWI&^CY=oIoXxQvAn5(4
z7&-p!2S9h)qgqEordJr$7v1=7R6}CjO!%&@HZyPDtS`TnCqSn(XfGQpx^qtA$JDZJ
z7xq=$o8hsVy7GD!yTH`SuvU1I@!9}SK82?}&EB}qV6R8)WvDzj)6jjIsP>oJ
z1cNOO@vWE4nV!U5UHg(P3GlRB`1`kxfp@*%zP2azZ
zh%dGwSkCTFD`5MmM{*e5Q)@1y0dmH`&~k1lQ-|8$Unwwbs^54NY^bs~!RF$q)&AkZ
zV3#R(^@#6lEqeiyO{xHtbhShYTx(w>!mJy?BZQMaJ}Y2<05bbOTWxTD9AnnQre*R0
zm}rxVNJB6NgBa8O@c?Vy1D=`y-&X4tv(DjgtRlrw>@9XGZqa1{3H{#MGb`^rN`GFS
zGcA1&eSh?=O+1`IZimnPYd+=~ay`4vR#h$z-etQF^Da5XXhA}O8`?xRVzG%D!%quRCO*z
z^Z|qaR_y35J3Z-E|?`t^SJeM#V{VZ1-x$8mv4~CSvvd
z3-ov`QvR(bmU@gZ^}*~@48n-0>R19VcVK1p__G>@&o|M?A@trH$>U(qR;ph(6jg*>
zvwYBBEa%#X!Pznd(gTE?9(j03!l3hmi_H%J?p#OD7~4KQu!Yxv}7Td)Q+;h~>mj!T#mYyQxmrqpudUx!`9S
z(~j-1uA{+qgLe@kd8+oXaMCWYv?hX>HI32F`2c{*lvobG$gU}Q9CCH;-K!_V#P?LL
zjvZO1&Xa|r@XgJ(J+MupvFW{9hVYd6>%i3>37va&Pxv0(RcjW@W!}!cZ`InYj#v2J
zVhwf@2iKM8#I%j?O^6oob<}0DbK3^J>+&dIHv)tA}!3%`?cm_fyhXQEpn9`5PxiYKK6IgLF
z=pTuh__v+QdCj?uBHFee=(Y+VX1{J#N%r}#aJ0J!*Xlob;#LIzy{(S5|C&;rVt1|;
zeb{9JAAWtl=G)r-q-8ahncdx@*ESKxrQDcwTi$Yy7iGXBT#M1Nu^rx2oz78tteJlL
z5F~Lg13$S$WD8ytC=lPMiY5xqtFgcvQ@XH$#P+PhHL{J5zm!ZZc8irT$MXT8k+cXt
z!kKLZ_5gW3j_sQai5Gs3qx1R@G*-Jqlwq5I$D7S}n6Z?ZbxMI$g}*K#8g&k$VL%>ihDBo4h2U2nXxqIY6p&!m)sB4K7TJ+6pz^Fa)%nM<=%6T0n6I~a
zr$e^3AFTr2EkDhfXfo7SBY~aYp2O}p{~mrl%a3%}-L9~)!0cXVrak-57|ieWx;5sn
zt0MjRxwjX+?}D?Swq$uQzhBEA0GDY;&%yr4yBcTO2Sv0crDt_ygY;kEq$fkNqP*6#
z%~m>lrfGC#EjSU(`K_)it2t6ig|?KqL36KXh3?&2@m1j!3MUZ;FF0`um`wfP(5gQcA
z=ywK5iPj7A?ZM+r^ltOKeH{u^IlvsyF!~#$-Nxz`BA2DIC)gIReLG<~SZYQ9_qS^4
z=QE8cF;`Ed%D0-?gtut&v;;4ki+p1LG>SEvn6E+Ao-tqMieV!NM;!SK*j@riJ-eu`
z_#J6h(zIJii)^Fejv>-?twpA^i39|o>>l7k?Q=KH6J)n{yPIuUl@fOJ=%`ovD30=Y
zEJth2n9?7CC#T0#YW4tT1XuK46O+0x-6Je9&V&OAJF9v%Oj~*h7!l}xE#LJByT<56
z&3pH^dBAyd+zRZB38UGecXz#5Aq8P!l^}gFR_3h>)@A6qJ1cA}@Mi
znBG1=OQ7&UuRVinVoRYnUN&%xX)?|RSxURqJ<}Xg$s`^u_3RQ8IXgZt<8n%22)H$y
zs_5_UE{Nn^3sx!fg0r#Y*V>JboUielgoLujVd`;=Z*;xov1wRE^LM@Q!{CPC^dbf$
zpP;T5f?b6Z#Rw%dF1jY}-Ld91%vLZ?*}RR(zCJQkv01_EYI)|J=X%v%P_@c~(f$1u^ed|`T&68LfA9{0tL0{k@2XR~mgAwV#&Kp)u_Eqw{A?}WM
z`GwKaLxO~y-|hp@Vq(zWA4eWebnGtTOSx#bSK5Zlc1lfm@4<@*QT5u*-HH-^2*Hv^
zHL_=^>_>l>_hjCWdI4EgF5V(IqP{}PTOhiTLW?~{Gm(0N^RFrE^3F99Lt`vgHr)rH
z^f>U51U^K<%zyp4S1Qcs2mN+o@a9R1E7hNPb=UrS=@RZdiczlOiQS;ti6%`bU(v@(
zBiO(7C|9Slt7kpkY^L1DlC|s*{cKpM27JWDEgFWuZS}=a>c<2t=*6enlL}$T^fvvD
zXm
z-y(-dM|V4SuOOZ-wrq5U|0WeBJWWkQ9jGl}J5X|u#Nlze8J-q=h|LlU|?Wk91CZcPv2+
zY)7UcTd4(7U;YU7jQA}J8mIRLm8hupoJl+lL_drFEvaXApP)URiB46m$c)}9-GE5P
zm!!oZlD)A3;^3Mt78Q>kqew>cVh)ueSsjN9l1JKpOTa71rjO56li(^>Vh6SWFF^uU
zyNj4kPV+Og`--{0B(Z@gBvQJ}<|4<5k;QxA5VdBwrUzW4NIs0u~ao7X-B5-M+M(-!W#F^*GW9OYX<-BFAd*Q
z?T66#0`i-&T!w(sU`b2FlaXwx^8LcX3uQP7gsp&NQ&oWAD3i!CcmP!&`1
zW6(r<;OPuQ(%7jF{;Qb@#bA*-*x@zE&<(V{A>V2*vFe()FgcOKGrid@74=x{DOuj`
zvhgq*rcb{1(-Q5rk`|A>e%l)Lp(JLd-tkYnj`E*jQ6YiT0Wr4o1F_{7`>E5FJcFLU
zDgA(~Kz?5eDZJmH5=adhn>tb1{-$XyUk6`_o~@svKwCk606qr)|Se
zuw`J>Gf|h;>wtxjl%J{|u8Kn|gbDiN=8l2L=R>^;EybagG@3J{V7_|^OTBTEq+>wP
zRDlh7SEap07a`Q|P?CoJRZ0}o96m|P3))+aX@lPRki1#J+q0Z@w@@ZUK4;4Akb>B~
ze}DInAIgqeXPa->bb#z|&~LZB55QtE+Xui>h=k*fHb&NgIG5(Ga@}wLjY|ns;p1pn
z%LV-?zANwFdUewg@?)qBS}(9-p`@#)MoQ44HT<=jSc^Aa(Dv9nMOlyPwoc_5qiazQK2$IvZ?g4o`hU9&Y{sU==>Qa(Ov#
zv$pAZ?$f1i;V?PeY^Q7y6Q^FZd$#3KyUo3j9gA2{Owj*5=N|&4R*mjwY1VKDb$7Wj
zq?(@|7{at|G|s`Rcd&PK;q6V%WMFz&`~tl9R7sA(>LqeArGw3q9bBM#;UjDjv9XD|
z8WV{Y%2*RaGmtrr&7T6*p{}#pGvl-WmZUZPD36y(*pRrI5ig^l9Xpj?S;(P^oC{G-
zBG%~Hs)k$o8?7HmA`u6BTRHP&H7OTfE{8GFlV=?<$P&4z+}A4GJGUdp7w+Pmr?ZI$C3^YrC%JnT0Bvm4$FHl@
zC^D?t6Q^!3m@aROsLZr@MH
zMH(yeG#nSeHr$IoG4VUp+r74uKrzX>d&{jhdBT9U-oy=}RO{Y-)CDgn
z^Y*?L49u=g;ADgw3fG{QeUEks8;CqdZ(CPq((ID6q}nj8%If-rA!>*Q^3Q
z3NXYq5<~njEO=7+`DK;-{}WtdLSH&|l8_+Ofyd;((s2VddF62@FZ
z6;P8Hm^Dj1Y$H?QD`oL*mpu{#)(7VZkXzH
z-FFvqw^>d>@!pm1p}#A#)KiEleMqR+?KKP6k_%UV-QMbYeVNo+qDjPZVRo1)c&u%;
zXV6>cac$_bKGZWQjS-+GCml0iW=M<6<1ovd2t;SvRGA!XJKtcXaCZb)eCztBN2~a>
zbBpM$jIGV^eE;l6E?DEr^d-1d@f=uN(k5c3laVZUMb4?76Pd&qBKr)9bz(U|&e)T0
zOXqP-%II##0RKBEEsRv_U6TaZq*>Wt42fm>IOs?w6T!dCM3wG}`a3v-%3IDO(`YKP
z>cl$XH8d2wfh{p{ogHBKE{RIFr97mXWF|s&>i5^^6@#7dzv!IQS4wSc(Z9n9p!d>%
zBEUt+6!V?67{^#v{ckloiI6;V3}o^HaNE`oT3b1yq6c@ANI{obsq%jox|{VGwk9jg
z=a_l78%^0}YV>fJk!IaaHTMr^Jq}kg3||+J4rd6$e=7!f7Bv3pJ)AngU7)xs#=H8`
zyB)0L_M|d+xn%14O-G4CfpB#aW2H57iXYniM7p}Occ_Q-p}TQUi#fvna6EmXv5hy2
zX84}}^Jl?ob;j_m<_`uSWst7cX5w$Ufkj9ZBdQ+ctzp!GPRStGu?R3YI0)cz#wQ8H
z)J9UUq9{XEWD|IeRep%6XR?+LKZ3&CT~wuxoBWnCo}>!h+nKh1T{+kUPpLlITiEmy
z^BZ&vGKHc>s_pF48s1~Rp&q?}ucWs8xA}(d&nFoLeO&5t57ZaAgBLAILn9cN)pOvc%qM-OEj6c%P;3O_=41+B?nR+nDGwQ*j7ZX^*FLq*BI
z^6*az5L;ED@jpER(tu)8{@*1S%2m_9z>OSLyo8|E8FIRb?osDMnweo(dp#D36o=Rv
zt;bM0=52WXhQGm{^s{2Gccw^x6ssfimxyMuL-t*}t)zRve5Q45SR`
zHc4ewC8RXv)q|s+fJ^JgHL3rUCS~Xt^{pYeoXqW_MJTy%d?UF*Bioo+eP8I_&G_
zyg=S`ST0o`rb4)g2a=Tod7D5+%tKjm#{$gs0fs1>5*1TTFAxJZ+<=~6g0RwH#OAg_
zQRpzLO`%GuywS}5zDSbu^*BvL+|7iX`CR)wwRnl!vD%M;5GXH-h*8=oNsGmziyW39
zF!%MAGRT-a=dp5hNiiwf=+KfQK8rbvS=pXHbZ+U}NGzh5e#kVT;|Td3{!O9o@Vyjf
ze*3M}w{Sl7RhcL??>FErWolwlob`=6eb~^{e&p;sszKC4{f@mF7+okBL!Gcy!BIAV
zizl0i{33<CG0CySWBR6l`2g%^3?
z&50IR(n>SxB&E2N9Xli?9Xc39s%!zhsCj>4dh#I2G%N=(5xARUGY_-0mUQYm#FZZaeHvGdpyX$563m~
z(OFVwvN$YZkl*(%`}6fv>$hy78IfPvx*LTP+g#64=V8JY?nCB#mS0R2E(%m3%-J0J
ziuAoTjBx;>ys?-oIE&h9_pd(ykj9XZoVmUMM@J6y-VWqv%A=%E6!wU>%h2thm5QGrP
ztj7xvFe;_w>}KLA+}c?1h1ZOFaVY+%?f
zy01%c>156rn%@#x&&lO2mZ)fA!+OCTM-Z`EpxvkQtv#aX&1BZDRDOr4Q}!rOouclh
zYtFY^V;lm
zuZ~md*-*)c220@XRxh44?Tq;UeXQ|uS;d3EZk1UZT)JAW=qgVt6qKC_L)^iN`abwL
zd@d(CM(%K>yU5qv1hqbP}$>Is-aLq<#9<3Emejn-t5#?FZQ
zE(4vE+UecW$;8C-)vKy)Ai{bZR8)!@BUGC6R0%E*NpAW**g1eTt7!YQh*&6+6Pqww
z@p`>Qe`Z85fs%?oYK)_=4{>f$k;IUGj*0n7U-Q7TGvWG
z0L;UI(LgE2w3ISfM)c$M>|_YnsFh1n3}~a~9pc-e2v`}DY$zKQ_C`?7d<_79tld#N
z#1(q7=aPx)T;z-ut9}J*%HUh|aeoE3l1St*EIvYDEN&>}sY_9rS1yF^k`0E0XdhE_
z)MMhykKDK)vNY*G{d4Ctz{mq8=m{Jlw>O2X*kSa3H5K#FpVHB+SH{DA>qVu7gjsXh
z@DwpokpzU%%@T+0*azIy+lZoj1r46C=vpF2c=uNf0s
z3K_SuLdkiSWvyZ}-cObvq2F_CjqcSS<8&L*6;pVU=V)aIA$Tsbe}S33JEbA3$!_X>
zw~mbxANl}Lnpu=m6?S@|`c0@#LE6Zn5mnh-&>4%3!^H?2{FaM~3NEb(iy1zu#fs%l
zt`}&^O=`m{!pPz1xwB4{9q!<@amW);V$BNoD8uZGOshD4vayn
zd|Zld#E%Pn5xtMTH&z0F3oan%&?s83@pdiJhcqHXJLne{sYTh7HFekV4olns6k6
zI41>&X~@b@n%}Zv9*HUQs??1b+H`NGRP>5dN^7?Y%_it98`oHnt>IJ^p^_1t@=HU~
zwZ`_V(K2IDm;Ltfd8QPcI>Z;P#UP&|OW!CtDy3TM`2nGiD{XO+l~uKZ#O-)WBpr*f
zs6g)^#k%6F{H>ofaXpD$Qlr=}PKIqmad}{H{uy#f6$q>?pj$Ov7d}Xq3~^H`NwkBN
z*L7?Ylup8gLG-SsPgik{5SDzVaZ>VFvR_aEQ?~SUxt|tBqwRPkZo7`@)_sLgX%@v(
zJtaa5DQ;97!XDP2vEj9BNVh17DbtvTWB8)Yxi*eQF15ff>&T}`3phxDXfH%;v|Mk>
z3ulN}r$sGCy{mCt&QC~XZT)prbFC0q-diyHYI0a>^%tDWFU_i4t>e`H5aWwz5{6-?
z_C4HGlMN+p2npNNZMHZ>PtbsnjoLs8tbDsUk6N@rk~DQCCiMV?^{YCyb|M9fUj@jH
z$x6&@!AJf3=wbJmT7wnmziO!%(F9rY*&R))RPHS@nDzQEtR5E5c(a5f*U^JD&i2&
z<1Bno^FDfD(~aF)c~owLrs4t)EonSza&p!NIFGB?p>VCYDZClkMMWMMMdr
zZktFkE8$L5FGulnSV%_PYYkmplI463-bjT108)oJLOr0;z>&;26vQ5
zjY=Ce}E6*+J+r_3^{Z5Ep?c+b!!F+A*nDGzmUjfWes92SS
zF-o$o^b#+~kelgCMHfsUk25->&itr|@eg5@tTWYD1w)qtmM-RNXX@&F$xypJ)hVV0
zfUSrGLV|$;B~paT9BnLqLK2qcyj#8E_lMqgI$;IRzb=0cy3)%N$MQlFsm&hjC~{IqaguLT-darKYyG=?z`}^|pPYt>q+>
zJ2_t!WAGoHk$wpzBEXa6oT%z5k6#Y2>QJV|s?k$-x|xc5<`Vvb_KR@81Tf+3U3BN|
zzkf3T$}qOuQc-qQcbz@!8MPzSwIbUFW&I?gM>2IG?8Z)&7Ih)|4giQ42@=3j=+V7L
z!y>azZ7jlHO^CZYId+O@XJV*EMn*L!;NukoF#>j?d8FF$<80;ZKGDeRIu9ZvnHN1W
zg{=_Xe0Y}5cFajUAVW20jI?(^p)+&x)dnpFQUL$chnCaHTSq#KV2yQ{7H|&hCLMeL
zuoDpwekR3nNQc#S^YfUUKEC1z+V88Uw&}TgGbp`KO
zZ^ZcPjUJ_5!4_Tc805<91Z)&tP!u_HnTxJ5(PaX51sYkp{bH@sg^J9XiS=DMoY9)F
z!w*vS#u2Ql_6s)jhD=OKmXokz%NC#wPBV#>DGXNmZa{w^+YW*<*7bBNvIN(PA`3hW
z=j5&xDy*SWwTAq<9EG=lO6t;c&%h%t+4xpi3Deljvz*@oT=+FApaas#6O&PX4Mh{Xm>
zlx@-vuil&LK7f-y8`5Ma4A%uU4by}_z*UT%gnWhOxw;ISK(%p+iva_y{18^V4&fIT
zuD~ZZGZOSqHTlH7(jd6htm=^iUd0d6Cz#yOWIppGO($Q{bKHkEJY2FIx1f|eVpS<>|6xX(k&S0;y
zoMHBJsAtX%$7bm#gf|Ig@Bn+RKnS*0Hd5ncd9|F0<>m^T(^;6SgnioAt*E-)moj>S
zZ8e#o*e=gkRWZeug~7$h!WkEoS*}R$C8S6KIJcu@EodN$C^}F{0#3yB-UusL|JkD0
zLebc!3q2FfDzmCANVS_Z5SQy@L3G*f7PClSDO-|Sq&JXnq!-!jf(4DxM>
z98cad@G8(2=cJmV{<^u)uN=d9DiI+jXhZH%`M>%&>!3KEe%&vyz#L#vaCf)h5%zQFXr9=M6eI>gwAeJfwN76u
zW1^fF<1!5Sfv++C+(2>v_;TfK4n8ZXw+&5=QVeyOR~bvMy$55!WPx#SbyYgAyoIo6
z8l_b<<~3aL7~9gbeYRV+rcy93Sxtn7es(*2D;SKz3D&PElDIcd^Kvnm@C&T473j>(
zGN;1G(P_*L`6B${y?%p2)T3}Aq$}>z>^u6-{s~>AvDQn)ZOM$2TI5SQ5avQ83FvXzIDVnZWK
z!5g*SzW`5egD#o5(dAGAEU{LPg?$sMGF~oHK7+^dzB@sToa#0zca2GJb%xU0QWknS
zI?pX5OXTxuUA)+-dSRj76=+M4rKz)(%z
zkk4p3$%C#~3H3$5-sbBE9rs@tY#ibvMBjsEhXF5ApV{^hp#9K3t04{%RpC)m|NP=r
z