diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/LICENSE b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/CFL_StdConvs.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/CFL_StdConvs.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2e5c53a0c3a9d76ff91e860a54d8e7fd7a3fc16
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/CFL_StdConvs.py
@@ -0,0 +1,257 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from .network import Network
+import tensorflow as tf
+
+
+class LayoutEstimator_StdConvs(Network):
+ def setup(self):
+ feed_dict_test = {}
+ feed_dict_train = {}
+ self.nname = "edge-estimator"
+ with tf.variable_scope(self.nname):
+ (self.feed('rgb_input')
+ .conv(7, 7, 64, 2, 2, relu=False, name='conv1')
+ .batch_normalization(relu=True, name='bn_conv1')
+ .max_pool(3, 3, 2, 2, name='pool1')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
+ .batch_normalization(name='bn2a_branch1'))
+
+ (self.feed('pool1')
+ .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
+ .batch_normalization(relu=True, name='bn2a_branch2a')
+ .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
+ .batch_normalization(relu=True, name='bn2a_branch2b')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
+ .batch_normalization(name='bn2a_branch2c'))
+
+ (self.feed('bn2a_branch1',
+ 'bn2a_branch2c')
+ .add(name='res2a')
+ .relu(name='res2a_relu')
+ .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
+ .batch_normalization(relu=True, name='bn2b_branch2a')
+ .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
+ .batch_normalization(relu=True, name='bn2b_branch2b')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
+ .batch_normalization(name='bn2b_branch2c'))
+
+ (self.feed('res2a_relu',
+ 'bn2b_branch2c')
+ .add(name='res2b')
+ .relu(name='res2b_relu')
+ .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
+ .batch_normalization(relu=True, name='bn2c_branch2a')
+ .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
+ .batch_normalization(relu=True, name='bn2c_branch2b')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
+ .batch_normalization(name='bn2c_branch2c'))
+
+ (self.feed('res2b_relu',
+ 'bn2c_branch2c')
+ .add(name='res2c')
+ .relu(name='res2c_relu')
+ .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1')
+ .batch_normalization(name='bn3a_branch1'))
+
+ (self.feed('res2c_relu')
+ .conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a')
+ .batch_normalization(relu=True, name='bn3a_branch2a')
+ .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
+ .batch_normalization(relu=True, name='bn3a_branch2b')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
+ .batch_normalization(name='bn3a_branch2c'))
+
+ (self.feed('bn3a_branch1',
+ 'bn3a_branch2c')
+ .add(name='res3a')
+ .relu(name='res3a_relu')
+ .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
+ .batch_normalization(relu=True, name='bn3b_branch2a')
+ .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
+ .batch_normalization(relu=True, name='bn3b_branch2b')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
+ .batch_normalization(name='bn3b_branch2c'))
+
+ (self.feed('res3a_relu',
+ 'bn3b_branch2c')
+ .add(name='res3b')
+ .relu(name='res3b_relu')
+ .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
+ .batch_normalization(relu=True, name='bn3c_branch2a')
+ .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
+ .batch_normalization(relu=True, name='bn3c_branch2b')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
+ .batch_normalization(name='bn3c_branch2c'))
+
+ (self.feed('res3b_relu',
+ 'bn3c_branch2c')
+ .add(name='res3c')
+ .relu(name='res3c_relu')
+ .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
+ .batch_normalization(relu=True, name='bn3d_branch2a')
+ .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
+ .batch_normalization(relu=True, name='bn3d_branch2b')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
+ .batch_normalization(name='bn3d_branch2c'))
+
+ (self.feed('res3c_relu',
+ 'bn3d_branch2c')
+ .add(name='res3d')
+ .relu(name='res3d_relu')
+ .conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1')
+ .batch_normalization(name='bn4a_branch1'))
+
+ (self.feed('res3d_relu')
+ .conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a')
+ .batch_normalization(relu=True, name='bn4a_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
+ .batch_normalization(relu=True, name='bn4a_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
+ .batch_normalization(name='bn4a_branch2c'))
+
+ (self.feed('bn4a_branch1',
+ 'bn4a_branch2c')
+ .add(name='res4a')
+ .relu(name='res4a_relu')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
+ .batch_normalization(relu=True, name='bn4b_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
+ .batch_normalization(relu=True, name='bn4b_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
+ .batch_normalization(name='bn4b_branch2c'))
+
+ (self.feed('res4a_relu',
+ 'bn4b_branch2c')
+ .add(name='res4b')
+ .relu(name='res4b_relu')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
+ .batch_normalization(relu=True, name='bn4c_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
+ .batch_normalization(relu=True, name='bn4c_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
+ .batch_normalization(name='bn4c_branch2c'))
+
+ (self.feed('res4b_relu',
+ 'bn4c_branch2c')
+ .add(name='res4c')
+ .relu(name='res4c_relu')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
+ .batch_normalization(relu=True, name='bn4d_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
+ .batch_normalization(relu=True, name='bn4d_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
+ .batch_normalization(name='bn4d_branch2c'))
+
+ (self.feed('res4c_relu',
+ 'bn4d_branch2c')
+ .add(name='res4d')
+ .relu(name='res4d_relu')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
+ .batch_normalization(relu=True, name='bn4e_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
+ .batch_normalization(relu=True, name='bn4e_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
+ .batch_normalization(name='bn4e_branch2c'))
+
+ (self.feed('res4d_relu',
+ 'bn4e_branch2c')
+ .add(name='res4e')
+ .relu(name='res4e_relu')
+ .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
+ .batch_normalization(relu=True, name='bn4f_branch2a')
+ .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
+ .batch_normalization(relu=True, name='bn4f_branch2b')
+ .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
+ .batch_normalization(name='bn4f_branch2c'))
+
+ (self.feed('res4e_relu',
+ 'bn4f_branch2c')
+ .add(name='res4f')
+ .relu(name='res4f_relu')
+ .conv(1, 1, 2048, 2, 2, biased=False, relu=False, name='res5a_branch1')
+ .batch_normalization(name='bn5a_branch1'))
+
+ (self.feed('res4f_relu')
+ .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a')
+ .batch_normalization(relu=True, name='bn5a_branch2a')
+ .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
+ .batch_normalization(relu=True, name='bn5a_branch2b')
+ .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
+ .batch_normalization(name='bn5a_branch2c'))
+
+ (self.feed('bn5a_branch1',
+ 'bn5a_branch2c')
+ .add(name='res5a')
+ .relu(name='res5a_relu')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
+ .batch_normalization(relu=True, name='bn5b_branch2a')
+ .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
+ .batch_normalization(relu=True, name='bn5b_branch2b')
+ .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
+ .batch_normalization(name='bn5b_branch2c'))
+
+ drop_out_d = tf.placeholder(tf.float32, name="drop_out_d")
+ feed_dict_train[drop_out_d] = 0.5 # 0.5
+ feed_dict_test[drop_out_d] = 1.0
+
+ (self.feed('res5a_relu',
+ 'bn5b_branch2c')
+ .add(name='res5b')
+ .relu(name='res5b_relu')
+ .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
+ .batch_normalization(relu=True, name='bn5c_branch2a', dropout=drop_out_d) # def
+ .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
+ .batch_normalization(relu=True, name='bn5c_branch2b', dropout=drop_out_d)
+ .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
+ .batch_normalization(name='bn5c_branch2c'))
+
+ # ------------------------------------------------------------------------------------
+ # decoder EDGE MAPS & CORNERS MAPS
+ (self.feed('bn5c_branch2c')
+ .upconv(None, 512, ksize=5, stride=2, name='d_2x', biased=True, relu=True))
+ (self.feed('d_2x', 'res4f_relu')
+ .concat(axis=3, name="d_concat_2x")
+ .upconv(None, 256, ksize=5, stride=2, name='d_4x', biased=True, relu=True)
+ .upconv(None, 2, ksize=3, stride=1, biased=True, relu=False, name='output4X_likelihood'))
+ (self.feed('d_4x', 'res3d_relu', 'output4X_likelihood')
+ .concat(axis=3, name="d_concat_4x")
+ .upconv(None, 128, ksize=5, stride=2, biased=True, relu=True, name='d_8x')
+ .upconv(None, 2, ksize=3, stride=1, relu=False, biased=True, name='output8X_likelihood'))
+ (self.feed('d_8x', 'res2c_relu', 'output8X_likelihood')
+ .concat(axis=3, name="d_concat_8x")
+ .upconv(None, 64, ksize=5, stride=2, biased=True, relu=True, name='d_16x')
+ .upconv(None, 2, ksize=3, stride=1, relu=False, biased=True, name='output16X_likelihood'))
+ (self.feed('d_16x', 'bn_conv1', 'output16X_likelihood')
+ .concat(axis=3, name="d_concat_16x")
+ .upconv(None, 64, ksize=3, stride=1, biased=True, relu=True, name='d_16x_conv1')
+ .upconv(None, 2, ksize=3, stride=1, biased=True, relu=False, name='output_likelihood'))
+
+ self.fd_test = feed_dict_test
+ self.fd_train = feed_dict_train
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/__init__.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d90e902da62c77a09c1b6e58d8e71155b65bc74d
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+from. CFL_StdConvs import LayoutEstimator_StdConvs
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/network.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/network.py
new file mode 100644
index 0000000000000000000000000000000000000000..945d5a4de6a2bd4c40292406521b8e234c18ea65
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/Models/network.py
@@ -0,0 +1,708 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from npu_bridge.npu_init import *
+import numpy as np
+import tensorflow as tf
+import re
+import math
+from config import *
+from npu_bridge.estimator import npu_ops
+
+
+DEFAULT_PADDING = 'SAME'
+DEFAULT_TYPE = tf.float32
+
+
+def include_original(dec):
+ """ Meta decorator, which make the original function callable (via f._original() )"""
+
+ def meta_decorator(f):
+ decorated = dec(f)
+ decorated._original = f
+ return decorated
+
+ return meta_decorator
+
+
+summary = True
+
+
+def ActivationSummary(layer): # tensorBoard (jmfacil)
+ if summary:
+ TOWER_NAME = 'tower'
+ tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', layer.op.name)
+ tf.summary.histogram(tensor_name + '/activations', layer)
+
+
+@include_original
+def layer(op):
+ def layer_decorated(self, *args, **kwargs):
+ # Automatically set a name if not provided.
+ name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
+ # Figure out the layer inputs.
+ if len(self.inputs) == 0:
+ raise RuntimeError('No input variables found for layer %s.' % name)
+ elif len(self.inputs) == 1:
+ layer_input = self.inputs[0]
+ else:
+ layer_input = list(self.inputs)
+ # Perform the operation and get the output.
+ layer_output = op(self, layer_input, *args, **kwargs)
+ # Add to layer LUT.
+ self.layers[name] = layer_output
+ # This output is now the input for the next layer.
+ self.feed(layer_output)
+ # Return self for chained calls.
+ return self
+
+ return layer_decorated
+
+
+class Network(object):
+
+ def __init__(self, inputs, trainable=True, is_training=True, bs=16): # ,reuse=None): #cfernandez
+ self.inputs = []
+ self.batch_size = bs
+ self.layers = dict(inputs)
+ self.trainable = trainable
+ self.is_training = is_training
+ self.setup()
+
+ def setup(self):
+ raise NotImplementedError('Must be subclassed.')
+
+ def load(self, data_path, session, ignore_missing=False):
+ def transform_names(k):
+ if k == 'mean':
+ return 'moving_mean'
+ if k == 'variance':
+ return 'moving_variance'
+ if k == 'scale':
+ return 'gamma'
+ if k == 'offset':
+ return 'beta'
+ return k
+
+ print(data_path)
+ data_dict = np.load(data_path, encoding='latin1').item()
+ for key in data_dict:
+ superkey = self.nname + "/" + key
+ with tf.variable_scope(superkey, reuse=True):
+ for subkey in data_dict[key]:
+ try:
+ nsubkey = transform_names(subkey)
+ var = tf.get_variable(nsubkey)
+ session.run(var.assign(data_dict[key][subkey]))
+ except ValueError:
+ print("ignore " + key, subkey)
+ print(superkey, tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=superkey))
+ if not ignore_missing:
+ raise
+ print("Loaded weitghts")
+
+ def feed(self, *args):
+ assert len(args) != 0
+ self.inputs = []
+ for layer in args:
+ if isinstance(layer, str):
+ try:
+ layer = self.layers[layer]
+ print(layer)
+ except KeyError:
+ print(list(self.layers.keys()))
+ raise KeyError('Unknown layer name fed: %s' % layer)
+ self.inputs.append(layer)
+ return self
+
+ def get_output(self, layer):
+ try:
+ layer = self.layers[layer]
+ except KeyError:
+ print(list(self.layers.keys()))
+ raise KeyError('Unknown layer name fed: %s' % layer)
+ return layer
+
+ def get_layer_output(self, name):
+ return self.layers[name]
+
+ def get_unique_name(self, prefix):
+ id = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1
+ return '%s_%d' % (prefix, id)
+
+ def make_var(self, name, shape, initializer=None, trainable=True, regularizer=None):
+ return tf.get_variable(name, shape, initializer=initializer, trainable=trainable, regularizer=regularizer)
+
+ def validate_padding(self, padding):
+ assert padding in ('SAME', 'VALID')
+
+ def filler(self, params): # chema
+ # print "Filler: "+str(params)
+ value = params.get("value", 0.0)
+ mean = params.get("mean", 0.0)
+ std = params.get("std", 0.1)
+ dtype = params.get("dtype", DEFAULT_TYPE)
+ name = params.get("name", None)
+ uniform = params.get("uniform", False)
+ return {
+ "xavier_conv2d": tf.contrib.layers.xavier_initializer_conv2d(uniform=uniform),
+ "t_normal": tf.truncated_normal_initializer(mean=mean, stddev=std, dtype=dtype),
+ "constant": tf.constant_initializer(value=value, dtype=dtype)
+ }[params.get("type", "t_normal")]
+
+ @layer
+ def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, rate=1, biased=True, relu=True, padding=DEFAULT_PADDING,
+ trainable=True, initializer=None):
+ """ contribution by miraclebiu, and biased option"""
+ self.validate_padding(padding)
+ c_i = input.get_shape()[-1]
+ convolve = lambda i, k: tf.nn.convolution(
+ i, k, padding=padding, strides=[s_h, s_w], dilation_rate=[rate, rate])
+ with tf.variable_scope(name, reuse=False) as scope: # cfernandez reuse
+
+ # init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
+ init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(
+ factor=0.01, mode='FAN_AVG', uniform=False)
+ init_biases = tf.constant_initializer(0.0)
+ # kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable,
+ # regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
+ kernel = self.make_var('weights', shape=[k_h, k_w, c_i // 1, c_o],
+ initializer=self.filler({"type": "t_normal", # cfernandez
+ "mean": 0.0,
+ "std": 0.1
+ }),
+ regularizer=self.l2_regularizer(args.weight_decay)) # 0.0005 cfg.TRAIN.WEIGHT_DECAY
+
+ if biased:
+ biases = self.make_var('biases', [c_o], init_biases, trainable)
+ conv = convolve(input, kernel)
+ if relu:
+ bias = tf.nn.bias_add(conv, biases)
+ output = tf.nn.relu(bias)
+ output = tf.nn.bias_add(conv, biases)
+
+ else:
+ conv = convolve(input, kernel)
+ if relu:
+ output = tf.nn.relu(conv)
+ output = conv
+
+ return output
+
+ @staticmethod
+ def rotation_matrix(axis, theta):
+ """
+ Return the rotation matrix associated with counterclockwise rotation about
+ the given axis by theta radians.
+ """
+ axis = np.asarray(axis)
+ axis = axis / math.sqrt(np.dot(axis, axis))
+ a = math.cos(theta / 2.0)
+ b, c, d = -axis * math.sin(theta / 2.0)
+ aa, bb, cc, dd = a * a, b * b, c * c, d * d
+ bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
+ return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
+ [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
+ [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
+
+ @staticmethod
+ def equi_coord(pano_W, pano_H, k_W, k_H, u, v):
+ """ contribution by cfernandez and jmfacil """
+ fov_w = k_W * np.deg2rad(360. / float(pano_W))
+ focal = (float(k_W) / 2) / np.tan(fov_w / 2)
+ c_x = 0
+ c_y = 0
+
+ u_r, v_r = u, v
+ u_r, v_r = u_r - float(pano_W) / 2., v_r - float(pano_H) / 2.
+ phi, theta = u_r / (pano_W) * (np.pi) * 2, -v_r / (pano_H) * (np.pi)
+
+ ROT = Network.rotation_matrix((0, 1, 0), phi)
+ ROT = np.matmul(ROT, Network.rotation_matrix((1, 0, 0), theta)) # np.eye(3)
+
+ h_range = np.array(range(k_H))
+ w_range = np.array(range(k_W))
+ w_ones = (np.ones(k_W))
+ h_ones = (np.ones(k_H))
+ h_grid = np.matmul(np.expand_dims(h_range, -1), np.expand_dims(w_ones, 0)) + 0.5 - float(k_H) / 2
+ w_grid = np.matmul(np.expand_dims(h_ones, -1), np.expand_dims(w_range, 0)) + 0.5 - float(k_W) / 2
+
+ K = np.array([[focal, 0, c_x], [0, focal, c_y], [0., 0., 1.]])
+ inv_K = np.linalg.inv(K)
+ rays = np.stack([w_grid, h_grid, np.ones(h_grid.shape)], 0)
+ rays = np.matmul(inv_K, rays.reshape(3, k_H * k_W))
+ rays /= np.linalg.norm(rays, axis=0, keepdims=True)
+ rays = np.matmul(ROT, rays)
+ rays = rays.reshape(3, k_H, k_W)
+
+ phi = np.arctan2(rays[0, ...], rays[2, ...])
+ theta = np.arcsin(np.clip(rays[1, ...], -1, 1))
+ x = (pano_W) / (2. * np.pi) * phi + float(pano_W) / 2.
+ y = (pano_H) / (np.pi) * theta + float(pano_H) / 2.
+
+ roi_y = h_grid + v_r + float(pano_H) / 2.
+ roi_x = w_grid + u_r + float(pano_W) / 2.
+
+ new_roi_y = (y)
+ new_roi_x = (x)
+
+ offsets_x = (new_roi_x - roi_x)
+ offsets_y = (new_roi_y - roi_y)
+
+ return offsets_x, offsets_y
+
+ @staticmethod
+ def equi_coord_fixed_resoltuion(pano_W, pano_H, k_W, k_H, u, v, pano_Hf=-1, pano_Wf=-1):
+ """ contribution by cfernandez and jmfacil """
+ pano_Hf = pano_H if pano_Hf <= 0 else pano_H / pano_Hf
+ pano_Wf = pano_W if pano_Wf <= 0 else pano_W / pano_Wf
+ fov_w = k_W * np.deg2rad(360. / float(pano_Wf))
+ focal = (float(k_W) / 2) / np.tan(fov_w / 2)
+ c_x = 0
+ c_y = 0
+
+ u_r, v_r = u, v
+ u_r, v_r = u_r - float(pano_W) / 2., v_r - float(pano_H) / 2.
+ phi, theta = u_r / (pano_W) * (np.pi) * 2, -v_r / (pano_H) * (np.pi)
+
+ ROT = Network.rotation_matrix((0, 1, 0), phi)
+ ROT = np.matmul(ROT, Network.rotation_matrix((1, 0, 0), theta)) # np.eye(3)
+
+ h_range = np.array(range(k_H))
+ w_range = np.array(range(k_W))
+ w_ones = (np.ones(k_W))
+ h_ones = (np.ones(k_H))
+ h_grid = np.matmul(np.expand_dims(h_range, -1), np.expand_dims(w_ones, 0)) + 0.5 - float(k_H) / 2
+ w_grid = np.matmul(np.expand_dims(h_ones, -1), np.expand_dims(w_range, 0)) + 0.5 - float(k_W) / 2
+
+ K = np.array([[focal, 0, c_x], [0, focal, c_y], [0., 0., 1.]])
+ inv_K = np.linalg.inv(K)
+ rays = np.stack([w_grid, h_grid, np.ones(h_grid.shape)], 0)
+ rays = np.matmul(inv_K, rays.reshape(3, k_H * k_W))
+ rays /= np.linalg.norm(rays, axis=0, keepdims=True)
+ rays = np.matmul(ROT, rays)
+ rays = rays.reshape(3, k_H, k_W)
+
+ phi = np.arctan2(rays[0, ...], rays[2, ...])
+ theta = np.arcsin(np.clip(rays[1, ...], -1, 1))
+ x = (pano_W) / (2. * np.pi) * phi + float(pano_W) / 2.
+ y = (pano_H) / (np.pi) * theta + float(pano_H) / 2.
+
+ roi_y = h_grid + v_r + float(pano_H) / 2.
+ roi_x = w_grid + u_r + float(pano_W) / 2.
+
+ new_roi_y = (y)
+ new_roi_x = (x)
+
+ offsets_x = (new_roi_x - roi_x)
+ offsets_y = (new_roi_y - roi_y)
+
+ return offsets_x, offsets_y
+
+ @staticmethod
+ def distortion_aware_map(pano_W, pano_H, k_W, k_H, s_width=1, s_height=1, bs=16):
+ """ contribution by cfernandez and jmfacil """
+ n = 1
+ offset = np.zeros(shape=[pano_H, pano_W, k_H * k_W * 2])
+ print(offset.shape)
+
+ for v in range(0, pano_H, s_height):
+ for u in range(0, pano_W, s_width):
+ offsets_x, offsets_y = Network.equi_coord_fixed_resoltuion(pano_W, pano_H, k_W, k_H, u, v, 1, 1)
+ offsets = np.concatenate((np.expand_dims(offsets_y, -1), np.expand_dims(offsets_x, -1)), axis=-1)
+ total_offsets = offsets.flatten().astype("float32")
+ offset[v, u, :] = total_offsets
+
+ offset = tf.constant(offset)
+ offset = tf.expand_dims(offset, 0)
+ offset = tf.concat([offset for _ in range(bs)], axis=0)
+ offset = tf.cast(offset, tf.float32)
+
+ return offset
+
+ @layer
+ def equi_conv(self, input, k_h, k_w, c_o, s_h, s_w, num_deform_group, name, num_groups=1, rate=1, biased=True,
+ relu=True,
+ padding=DEFAULT_PADDING, trainable=True, initializer=None):
+ """ contribution by cfernandez and jmfacil """
+ self.validate_padding(padding)
+ data = input
+ n, h, w, _ = tuple(data.get_shape().as_list())
+ data_shape = data.shape
+ offset = tf.stop_gradient(
+ Network.distortion_aware_map(w, h, k_w, k_h, s_width=s_w, s_height=s_h, bs=self.batch_size))
+
+ c_i = data.get_shape()[-1]
+ trans2NCHW = lambda x: tf.transpose(x, [0, 3, 1, 2])
+ trans2NHWC = lambda x: tf.transpose(x, [0, 2, 3, 1])
+ # deform conv only supports NCHW
+ data = trans2NCHW(data)
+ offset = trans2NCHW(offset)
+ dconvolve = lambda i, k, o: deform_conv_op.deform_conv_op(
+ i, k, o, strides=[1, 1, s_h, s_w], rates=[1, 1, rate, rate], padding=padding, num_groups=num_groups,
+ deformable_group=num_deform_group)
+ with tf.variable_scope(name, reuse=False) as scope:
+
+ init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(
+ factor=0.01, mode='FAN_AVG', uniform=False)
+ init_biases = tf.constant_initializer(0.0)
+ kernel = self.make_var('weights', [k_h, k_w, c_i, c_o], init_weights, trainable,
+ regularizer=self.l2_regularizer(args.weight_decay))
+ kernel = tf.transpose(kernel, [3, 2, 0, 1])
+ ActivationSummary(offset)
+
+ print(data, kernel, offset)
+ dconv = trans2NHWC(dconvolve(data, kernel, offset))
+ if biased:
+ biases = self.make_var('biases', [c_o], init_biases, trainable)
+ if relu:
+ bias = tf.nn.bias_add(dconv, biases)
+ return tf.nn.relu(bias)
+ return tf.nn.bias_add(dconv, biases)
+ else:
+ if relu:
+ return tf.nn.relu(dconv)
+ return dconv
+
+ @layer
+ def upconv(self, input, shape, c_o, ksize=4, stride=2, name='upconv', biased=False, relu=True,
+ padding=DEFAULT_PADDING,
+ trainable=True, initializer=None):
+ """ up-conv"""
+ self.validate_padding(padding)
+
+ c_in = input.get_shape()[3].value
+ in_shape_d = tf.shape(input)
+ in_shape = input.shape.as_list()
+ if shape is None:
+ h = ((in_shape[1]) * stride)
+ w = ((in_shape[2]) * stride)
+ new_shape = [in_shape_d[0], h, w, c_o]
+ else:
+ new_shape = [in_shape_d[0], shape[1], shape[2], c_o]
+ output_shape = tf.stack(new_shape)
+
+ filter_shape = [ksize, ksize, c_o, c_in]
+
+ with tf.variable_scope(name, reuse=False) as scope:
+ init_weights = tf.zeros_initializer() if initializer is 'zeros' else tf.contrib.layers.variance_scaling_initializer(
+ factor=0.01, mode='FAN_AVG', uniform=False) # cfernandez
+ filters = self.make_var('weights', filter_shape, init_weights, trainable,
+ regularizer=self.l2_regularizer(args.weight_decay)) # cfg.TRAIN.WEIGHT_DECAY
+ deconv = tf.nn.conv2d_transpose(input, filters, output_shape,
+ strides=[1, stride, stride, 1], padding=DEFAULT_PADDING, name=scope.name)
+ # coz de-conv losses shape info, use reshape to re-gain shape
+ deconv = tf.reshape(deconv, new_shape)
+
+ if biased:
+ init_biases = tf.constant_initializer(0.0)
+ biases = self.make_var('biases', [c_o], init_biases, trainable)
+ if relu:
+ bias = tf.nn.bias_add(deconv, biases)
+ output = tf.nn.relu(bias)
+ output = tf.nn.bias_add(deconv, biases)
+
+ else:
+ if relu:
+ output = tf.nn.relu(deconv)
+ output = devonv
+ return output
+
+ @layer
+ def reduce_max(self, input_data, name):
+ return tf.reduce_max(input_data, axis=1, keep_dims=True)
+
+ @layer
+ def reduce_mean(self, input_data, name):
+ return tf.reduce_mean(input_data, axis=1, keep_dims=True)
+
+ @layer
+ def argmax(self, input_data, name):
+ return tf.argmax(input_data, axis=1)
+
+ @layer
+ def bilinear_unpool(self, input_data, mul_factor, name):
+ _, h, w, _ = tuple(input_data.get_shape().as_list())
+ return tf.image.resize_bilinear(input_data, (h * mul_factor, w * mul_factor), align_corners=True, name=name)
+
+ @layer
+ def mul_grad(self, input_data, mul, name):
+ return (1.0 - mul) * tf.stop_gradient(input_data) + (mul) * input_data
+
+ @layer
+ def relu(self, input, name):
+ return tf.nn.relu(input, name=name)
+
+ @layer
+ def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
+ self.validate_padding(padding)
+ return tf.nn.max_pool(input,
+ ksize=[1, k_h, k_w, 1],
+ strides=[1, s_h, s_w, 1],
+ padding=padding,
+ name=name)
+
+ @layer
+ def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
+ self.validate_padding(padding)
+ return tf.nn.avg_pool(input,
+ ksize=[1, k_h, k_w, 1],
+ strides=[1, s_h, s_w, 1],
+ padding=padding,
+ name=name)
+
+ @layer
+ def roi_pool(self, input, pooled_height, pooled_width, spatial_scale, name):
+ # only use the first input
+ if isinstance(input[0], tuple):
+ input[0] = input[0][0]
+
+ if isinstance(input[1], tuple):
+ input[1] = input[1][0]
+
+ print(input)
+ return roi_pool_op.roi_pool(input[0], input[1],
+ pooled_height,
+ pooled_width,
+ spatial_scale,
+ name=name)[0]
+
+ @layer
+ def psroi_pool(self, input, output_dim, group_size, spatial_scale, name):
+ """contribution by miraclebiu"""
+ # only use the first input
+ if isinstance(input[0], tuple):
+ input[0] = input[0][0]
+
+ if isinstance(input[1], tuple):
+ input[1] = input[1][0]
+
+ return psroi_pooling_op.psroi_pool(input[0], input[1],
+ output_dim=output_dim,
+ group_size=group_size,
+ spatial_scale=spatial_scale,
+ name=name)[0]
+
+ @layer
+ def reshape_layer(self, input, d, name):
+ input_shape = tf.shape(input)
+ if name == 'rpn_cls_prob_reshape':
+ #
+ # transpose: (1, AxH, W, 2) -> (1, 2, AxH, W)
+ # reshape: (1, 2xA, H, W)
+ # transpose: -> (1, H, W, 2xA)
+ return tf.transpose(tf.reshape(tf.transpose(input, [0, 3, 1, 2]),
+ [input_shape[0],
+ int(d),
+ tf.cast(
+ tf.cast(input_shape[1], tf.float32) / tf.cast(d, tf.float32) * tf.cast(
+ input_shape[3], tf.float32), tf.int32),
+ input_shape[2]
+ ]),
+ [0, 2, 3, 1], name=name)
+ else:
+ return tf.transpose(tf.reshape(tf.transpose(input, [0, 3, 1, 2]),
+ [input_shape[0],
+ int(d),
+ tf.cast(tf.cast(input_shape[1], tf.float32) * (
+ tf.cast(input_shape[3], tf.float32) / tf.cast(d, tf.float32)),
+ tf.int32),
+ input_shape[2]
+ ]),
+ [0, 2, 3, 1], name=name)
+
+ @layer
+ def reshape(self, input, shape, name):
+ return tf.reshape(input, shape=shape, name=name)
+
+ @layer
+ def spatial_reshape_layer(self, input, d, name):
+ input_shape = tf.shape(input)
+ # transpose: (1, H, W, A x d) -> (1, H, WxA, d)
+ return tf.reshape(input, \
+ [input_shape[0], \
+ input_shape[1], \
+ -1, \
+ int(d)])
+
+ @layer
+ def lrn(self, input, radius, alpha, beta, name, bias=1.0):
+ return tf.nn.local_response_normalization(input,
+ depth_radius=radius,
+ alpha=alpha,
+ beta=beta,
+ bias=bias,
+ name=name)
+
+ @layer
+ def concat(self, inputs, axis, name):
+ return tf.concat(axis=axis, values=inputs, name=name)
+
+ @layer
+ def flatten_data(self, input, name):
+ return tf.reshape(input, shape=[input.shape[0], -1], name=name)
+
+ @layer
+ def softmax(self, input, name):
+ input_shape = tf.shape(input)
+ if name == 'rpn_cls_prob':
+ return tf.reshape(tf.nn.softmax(tf.reshape(input, [-1, input_shape[3]])),
+ [-1, input_shape[1], input_shape[2], input_shape[3]], name=name)
+ else:
+ return tf.nn.softmax(input, name=name)
+
+ @layer
+ def spatial_softmax(self, input, name):
+ input_shape = tf.shape(input)
+ # d = input.get_shape()[-1]
+ return tf.reshape(tf.nn.softmax(tf.reshape(input, [-1, input_shape[3]])),
+ [-1, input_shape[1], input_shape[2], input_shape[3]], name=name)
+
+ @layer
+ def add(self, input, name):
+ """contribution by miraclebiu"""
+ return tf.add(input[0], input[1], name=name)
+
+ # The original
+ @layer
+ def batch_normalization(self, input, name, relu=True, dropout=None): # , is_training= True): #, is_training= True
+ # jmfacil/cfernandez: dropout added based on pix2pix
+ is_training = self.is_training
+ # is_training=False
+ if dropout is not None and is_training:
+ temp_layer = tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training,
+ scope=name)
+ if relu:
+ temp_layer = tf.nn.relu(temp_layer)
+ # output = tf.nn.dropout(temp_layer,dropout)
+ #return tf.nn.dropout(temp_layer,dropout)
+ return npu_ops.dropout(temp_layer, dropout)
+
+ """contribution by miraclebiu"""
+ if relu:
+ temp_layer = tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training,
+ scope=name)
+ # output = tf.nn.relu(temp_layer)
+ return tf.nn.relu(temp_layer)
+ else:
+ # output = tf.contrib.layers.batch_norm(input,scale=True,center=True,is_training=is_training,scope=name)
+ return tf.contrib.layers.batch_norm(input, scale=True, center=True, is_training=is_training, scope=name)
+
+ # ActivationSummary(output)
+ # return output
+
+ @layer
+ def batch_normalization0(self, input, name, relu=True, is_training=True, dropout=None, scale_offset=True,
+ decay=0.999):
+ is_training=self.is_training
+ shape = [input.get_shape()[-1]]
+ with tf.variable_scope(name, reuse=False) as scope:
+ if scale_offset:
+ scale = self.make_var('gamma', shape=shape,
+ initializer=self.filler(
+ {"type": "constant",
+ "value": 1.0}
+ )
+ )
+ offset = self.make_var('beta', shape=shape)
+ else:
+ scale, offset = (None, None)
+
+ pop_mean = self.make_var('moving_mean', shape=shape)
+ pop_var = self.make_var('moving_variance', shape=shape,
+ initializer=self.filler(
+ {"type": "constant",
+ "value": 1.0}
+ ),
+ regularizer=False)
+
+ if is_training:
+ batch_mean, batch_var = tf.nn.moments(input, [0, 1, 2], name='moments')
+ train_mean = tf.assign(pop_mean,
+ pop_mean * decay + batch_mean * (1 - decay))
+ train_var = tf.assign(pop_var,
+ pop_var * decay + batch_var * (1 - decay))
+ with tf.control_dependencies([train_mean, train_var]):
+ epsilon = 1e-4
+ output = tf.nn.batch_normalization(input,
+ batch_mean, batch_var, offset, scale, epsilon)
+ else:
+ epsilon = 1e-4
+ output = tf.nn.batch_normalization(input, pop_mean, pop_var, offset, scale, epsilon)
+ # jmfacil/cfernandez: dropout added based on pix2pix
+ if dropout is not None and is_training:
+ # temp_layer=tf.contrib.layers.batch_norm(input,scale=True,center=True,is_training=is_training,scope=name)
+ # if relu:
+ # temp_layer = tf.nn.relu(temp_layer)
+ #output = tf.nn.dropout(output,dropout)
+ output = npu_ops.dropout(output,dropout)
+
+ """contribution by miraclebiu"""
+ if relu:
+ # temp_layer=tf.contrib.layers.batch_norm(input,scale=True,center=True,is_training=is_training,scope=name)
+ output = tf.nn.relu(output)
+ # else:
+ # return tf.contrib.layers.batch_norm(input,scale=True,center=True,is_training=is_training,scope=name)
+ return output
+
+ @layer
+ def scale(self, input, c_in, name):
+ with tf.variable_scope(name, reuse=False) as scope:
+ alpha = tf.get_variable('alpha', shape=[c_in, ], dtype=tf.float32,
+ initializer=tf.constant_initializer(1.0), trainable=True,
+ regularizer=self.l2_regularizer(0.00001))
+ beta = tf.get_variable('beta', shape=[c_in, ], dtype=tf.float32,
+ initializer=tf.constant_initializer(0.0), trainable=True,
+ regularizer=self.l2_regularizer(0.00001))
+ return tf.add(tf.multiply(input, alpha), beta)
+
+ @layer
+ def dropout(self, input, keep_prob, name):
+ # return tf.nn.dropout(input, keep_prob, name=name)
+ is_training=self.is_training
+ if is_training:
+ return npu_ops.dropout(input, keep_prob, name=name)
+ else:
+ return None
+ def l2_regularizer(self, weight_decay=0.0005, scope=None):
+ def regularizer(tensor):
+ with tf.name_scope(scope, default_name='l2_regularizer', values=[tensor]):
+ l2_weight = tf.convert_to_tensor(weight_decay,
+ dtype=tensor.dtype.base_dtype,
+ name='weight_decay')
+ return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
+
+ return regularizer
+
+ def smooth_l1_dist(self, deltas, sigma2=9.0, name='smooth_l1_dist'):
+ with tf.name_scope(name=name) as scope:
+ deltas_abs = tf.abs(deltas)
+ smoothL1_sign = tf.cast(tf.less(deltas_abs, 1.0 / sigma2), tf.float32)
+ return tf.square(deltas) * 0.5 * sigma2 * smoothL1_sign + \
+ (deltas_abs - 0.5 / sigma2) * tf.abs(smoothL1_sign - 1)
+
+
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/README.md b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..307afcd65969c53fc690c2d241e417db1a910a8c
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/README.md
@@ -0,0 +1,171 @@
+# 基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Computer Vision**
+
+**版本(Version):1.1**
+
+**修改时间(Modified):2022.03.09**
+
+**大小(Size):117KB**
+
+**框架(Framework):Tensorflow 1.15.0**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Research**
+
+**描述(Description):基于Tensorflow框架对360°全景图片实现3D布局恢复的测试代码**
+
+# 模型概述
+
+CFL模型是CFL: End-to-End Layout Recovery from 360 Images论文的Tensorflow实现,该论文的核心思想是使用StdConvs模型和EquiConvs模型分别在360°全景图片上实现3D布局恢复,并生成边图和角图。需要注意的是,此脚本是使用了StdConvs模型。
+
+- 参考论文
+
+ [Corners for Layout: End-to-End Layout Recovery from 360 Images (cfernandezlab.github.io)](https://cfernandezlab.github.io/CFL/)
+
+- 参考实现
+
+ [GitHub - cfernandezlab/CFL: Tensorflow implementation of our end-to-end model to recover 3D layouts. Also with equirectangular convolutions!](https://github.com/cfernandezlab/CFL)
+
+# 默认配置
+
+- 测试数据预处理(以SUN360测试集为例,仅作为用户参考示例)
+ - 图像的输入尺寸:128×256
+ - 图像的输入格式:jpg
+- 测试超参
+ - Batch size:16
+ - Test epoch:1
+ - Test step:72
+
+# 支持特性
+
+| 特性列表 | 是否支持 |
+| :------: | :------: |
+| 分布式 | 否 |
+| 混合精度 | 是 |
+
+# 混合精度
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+脚本已默认开启混合精度,设置precision_mode参数的脚本参考如下。
+
+```python
+custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+custom_op.name = "NpuOptimizer"
+custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+```
+
+# 环境准备
+
+- 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南](https://gitee.com/link?target=https%3A%2F%2Fsupport.huawei.com%2Fenterprise%2Fzh%2Fcategory%2Fai-computing-platform-pid-1557196528909)",需要在硬件设备上安装与CANN版本配套的固件与驱动。
+- 宿主机上需要安装Docker并登录[Ascend Hub中心](https://gitee.com/link?target=https%3A%2F%2Fascendhub.huawei.com%2F%23%2Fdetail%3Fname%3Dascend-tensorflow-arm)获取镜像。
+- 安装必要的python依赖
+`pip install -r requirements.txt`
+
+
+# 快速上手
+
+模型测试之前的准备工作:模型使用SUN360数据集和CFL模型训练得到的ckpt文件(见参考实现),数据集和ckpt文件请用户自行获取。
+
+# 模型测试
+
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+
+- 启动测试之前,首先要配置程序运行相关环境变量。环境变量配置信息参见:[Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/%E5%85%B6%E4%BB%96%E6%A1%88%E4%BE%8B/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE)
+
+- 单卡测试
+
+ - 配置参数
+
+ 首先在脚本test/train_full_1p.sh中,配置data_path、output_path等参数,请用户根据实际路径配置data_path和output_path,或者在启动测试的命令行中以参数形式下发。
+
+ ```python
+ batch_size=16
+ data_path=./data_weights
+ output_path=./output
+ ```
+
+ - 启动测试
+
+ 启动单卡测试(脚本为test/train_full_1p.sh)
+
+ `bash test/train_full_1p.sh --data_path=./data_weights --output_path=./output`
+
+# 测试结果
+
+- 精度结果对比
+
+ - EDGES
+
+ | 精度指标项 | 论文发布 | GPU实测 | NPU实测 |
+ | :--------: | :------: | :-----: | :-----: |
+ | IoU | 0.575 | 0.588 | 0.583 |
+ | Accuracy | 0.931 | 0.933 | 0.931 |
+ | Precision | 0.789 | 0.782 | 0.818 |
+ | Recall | 0.667 | 0.691 | 0.661 |
+ | f1 score | 0.722 | 0.733 | 0.730 |
+
+ - CORNERS
+
+ | 精度指标项 | 论文发布 | GPU实测 | NPU实测 |
+ | :--------: | :------: | :-----: | :-----: |
+ | IoU | 0.460 | 0.465 | 0.457 |
+ | Accuracy | 0.974 | 0.974 | 0.974 |
+ | Precision | 0.887 | 0.872 | 0.885 |
+ | Recall | 0.488 | 0.498 | 0.484 |
+ | f1 score | 0.627 | 0.632 | 0.624 |
+
+
+
+# 高级参考
+
+##### 文件说明
+
+```python
+|--Models
+ |--__init__.py //网络初始化
+ |--CFL_StdConvs.py //网络构建
+ |--network.py //网络结构
+|--test
+ |--train_full_1p.sh //单卡全量启动脚本
+|--License //声明
+|--README.md //代码说明文档
+|--config.py //参数设置文件
+|--modelarts_entry_acc.py //拉起测试文件
+|--modelzoo_level.txt //网络进度
+|--requirements.txt //python依赖列表
+|--test_CFL.py //网络测试代码
+|--output //测试结果存放路径
+|--data_weights //数据集和ckpt文件存放路径
+ |--Datasets
+ |--SUN360
+ |--test
+ |--CM_gt
+ |--pano_0b9db1eaf8b73158dd047b8f810cf0cc_CM.jpg
+ ...
+ |--pano_azzfywvfwnlpcl_CM.jpg
+ |--EM_gt
+ |--pano_0b9db1eaf8b73158dd047b8f810cf0cc_EM.jpg
+ ...
+ |--pano_azzfywvfwnlpcl_EM.jpg
+ |--RGB
+ |--pano_0b9db1eaf8b73158dd047b8f810cf0cc.jpg
+ ...
+ |--pano_azzfywvfwnlpcl.jpg
+```
+
+##### 脚本参数
+
+```python
+--batch_size 每个NPU的batch size,默认:16
+--data_path 数据集路径,默认:./data_weights
+--output_path 结果输出路径,默认:./output
+```
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/config.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4a138d0c30c381c080b52d62699c6527142a488
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/config.py
@@ -0,0 +1,89 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+from npu_bridge.npu_init import *
+
+## PATHS: Please, change this before execution if needed.
+
+# Creates a directory in case it doesn't exist
+def check(dirname):
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ return dirname
+
+# The project directory
+CFL_DIR = os.path.dirname(os.path.realpath(__file__))
+
+# ---------------------------------------------------------------
+
+## Configuration of CFL
+
+# Mean color to subtract before propagating an image through a DNN
+MEAN_COLOR = [103.939, 116.779, 123.68]
+
+parser = argparse.ArgumentParser()
+
+# The dataset you want to train/test the model on
+parser.add_argument('--dataset', required=True, type=str, help='Path to dataset folders. It must contain RGB/, CM_gt/ and EM_gt/.')
+
+# CFL architecture
+parser.add_argument('--network', default='StdConvs', choices=['StdConvs','EquiConvs'], help='CFL architecture')
+
+# Path to weights
+parser.add_argument('--weights', required=True, help= 'Path to weights (eg. weights/StdConvs.ckpt')
+
+# Path to results folder
+parser.add_argument('--results', default=os.path.join(CFL_DIR, 'results/'), help= 'Path to results folder. It will generate the folder if it does not exist.')
+
+# GPU to be used
+parser.add_argument('--gpu', default="0", help= 'GPU to be used')
+
+# Ignore missing params
+parser.add_argument('--ignore', action="store_true", default=False, help= 'Ignore missing params')
+
+# TEST config
+parser.add_argument("--im_height", default=128, type=int)
+parser.add_argument("--im_width", default=256, type=int)
+parser.add_argument("--im_ch", default=3, type=int)
+
+# TRAIN config
+parser.add_argument("--weight_decay", default=0.0005, type=int)
+
+# Modelarts
+parser.add_argument('--platform', default='modelarts', help='runtime platform, linux or modelarts')
+parser.add_argument('--chip', default='gpu', help='device identifier -- gpu, tpu or npu')
+
+parser.add_argument('--logdir', default='/tmp/data', help='directory for summaries and checkpoints.')
+parser.add_argument('--obs_dir', default='obs://eric-mt-net/log/g', help='device identifier -- gpu, tpu or npu')
+
+
+
+args = parser.parse_args()
+
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelarts_entry_acc.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelarts_entry_acc.py
new file mode 100644
index 0000000000000000000000000000000000000000..13077b10e660de32d6f7861257a50e1a01ede9ba
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelarts_entry_acc.py
@@ -0,0 +1,63 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import argparse
+import sys
+
+# 解析输入参数data_url
+parser = argparse.ArgumentParser()
+parser.add_argument("--data_url", type=str, default="/home/ma-user/modelarts/inputs/data_url_0")
+parser.add_argument("--train_url", type=str, default="/home/ma-user/modelarts/outputs/train_url_0/")
+config = parser.parse_args()
+
+print("[CANN-Modelzoo] code_dir path is [%s]" % (sys.path[0]))
+code_dir = sys.path[0]
+os.chdir(code_dir)
+print("[CANN-Modelzoo] work_dir path is [%s]" % (os.getcwd()))
+
+print("[CANN-Modelzoo] before train - list my run files:")
+os.system("ls -al /usr/local/Ascend/ascend-toolkit/")
+
+print("[CANN-Modelzoo] before train - list my dataset files:")
+os.system("ls -al %s" % config.data_url)
+
+print("[CANN-Modelzoo] start run train shell")
+# 设置sh文件格式为linux可执行
+os.system("dos2unix ./test/*")
+
+# 执行train_full_1p.sh或者train_performance_1p.sh,需要用户自己指定
+# full和performance的差异,performance只需要执行很少的step,控制在15分钟以内,主要关注性能FPS
+os.system("bash ./test/train_full_1p.sh --data_path=%s --output_path=%s " % (config.data_url, config.train_url))
+
+print("[CANN-Modelzoo] finish run train shell")
+
+# 将当前执行目录所有文件拷贝到obs的output进行备份
+print("[CANN-Modelzoo] after train - list my output files:")
+os.system("cp -r %s %s " % (code_dir, config.train_url))
+os.system("ls -al %s" % config.train_url)
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelzoo_level.txt b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelzoo_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e34a411924aa7b6a688e60dd22cea2ea08a843f3
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/modelzoo_level.txt
@@ -0,0 +1,4 @@
+GPUStatus:OK
+NPUMigrationStatus:OK
+FuncStatus:OK
+PrecisionStatus:OK
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/requirements.txt b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4b8d744da315fd22d382abf35a53edd1917ae4a0
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/requirements.txt
@@ -0,0 +1,34 @@
+absl-py==0.7.1
+astor==0.7.1
+certifi==2019.3.9
+chardet==3.0.4
+cycler==0.10.0
+Cython==0.29.6
+easydict==1.9
+gast==0.2.2
+graphviz==0.8.4
+grpcio==1.19.0
+h5py==2.9.0
+idna==2.8
+Keras-Applications==1.0.8
+Keras-Preprocessing==1.0.9
+kiwisolver==1.0.1
+Markdown==3.0.1
+matplotlib==3.0.3
+mock==2.0.0
+numpy==1.16.2
+opencv-python==4.0.0.21
+pbr==5.1.3
+Pillow==5.4.1
+protobuf==3.7.0
+pyparsing==2.3.1
+python-dateutil==2.8.0
+requests==2.21.0
+scipy==1.2.1
+six==1.12.0
+tensorboard==1.15.0
+tensorflow-estimator==1.15.1
+tensorflow-gpu==1.15.0
+termcolor==1.1.0
+urllib3==1.24.1
+Werkzeug==0.15.0
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test/train_full_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..030e50c4deebcf1dca8b667f2f831cb320a943ff
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test/train_full_1p.sh
@@ -0,0 +1,191 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path=""
+output_path=""
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./test/output/${ASCEND_DEVICE_ID}"
+fi
+
+# 设置打屏日志文件名,请保留,文件名为${print_log}
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+modelarts_flag=${MODELARTS_MODEL_PATH}
+if [ x"${modelarts_flag}" != x ];
+then
+ echo "running without etp..."
+ print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank`
+ print_log="/home/ma-user/modelarts/log/${print_log_name}"
+fi
+echo "### get your log here : ${print_log}"
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+batch_size=16
+
+if [ x"${modelarts_flag}" != x ];
+then
+ python3.7 ./test_CFL.py \
+ --dataset=${data_path}"/Datasets/SUN360/test/" \
+ --weights=${data_path}"/Weights/StdConvs/model.ckpt" \
+ --results=${output_path}"/results" \
+ --network="StdConvs"
+else
+ python3.7 ./test_CFL.py \
+ --dataset=${data_path}"/Datasets/SUN360/test/" \
+ --weights=${data_path}"/Weights/StdConvs/model.ckpt" \
+ --results=${output_path}"/results" \
+ --network="StdConvs" >${print_log}
+fi
+
+# 性能相关数据计算
+StepTime=`grep "sec/step :" ${print_log} | tail -n 10 | awk '{print $NF}' | awk '{sum+=$1} END {print sum/NR}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'`
+
+# 精度相关数据计算
+train_accuracy=`grep "Final Accuracy accuracy" ${print_log} | awk '{print $NF}'`
+# 提取所有loss打印信息
+grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 判断本次执行是否正确使用Ascend NPU
+use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l`
+if [ x"${use_npu_flag}" == x0 ];
+then
+ echo "------------------ ERROR NOTICE START ------------------"
+ echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration."
+ echo "------------------ ERROR NOTICE END------------------"
+else
+ echo "------------------ INFO NOTICE START------------------"
+ echo "INFO, your task have used Ascend NPU, please check your result."
+ echo "------------------ INFO NOTICE END------------------"
+fi
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance sec/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+
+# 输出训练精度
+echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test_CFL.py b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test_CFL.py
new file mode 100644
index 0000000000000000000000000000000000000000..959d338fd9e3231d4c38c34fa827c96100cadd20
--- /dev/null
+++ b/TensorFlow/contrib/cv/CFL_ID1230_for_TensorFlow/test_CFL.py
@@ -0,0 +1,175 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import numpy as np
+import tensorflow as tf
+import scipy.misc
+from scipy import misc
+from matplotlib import pyplot as plt
+import imageio
+from PIL import Image
+import glob
+import time
+import math
+import os.path
+import Models
+from npu_bridge.npu_init import *
+from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
+
+from config import *
+
+def preprocess(img):
+ mean_color = [103.939, 116.779, 123.68]
+ r, g, b = tf.split(axis=3, num_or_size_splits=3, value=img)
+ bgr = tf.concat(values=[b - mean_color[0], g - mean_color[1], r - mean_color[2]], axis=3)
+ return bgr
+
+
+def evaluate(map):
+ if map == 'edges':
+ prediction_path_list = glob.glob(os.path.join(args.results, 'EM_test') + '/*.jpg')
+ gt_path_list = glob.glob(os.path.join(args.dataset, 'EM_gt') + '/*.jpg')
+ if map == 'corners':
+ prediction_path_list = glob.glob(os.path.join(args.results, 'CM_test') + '/*.jpg')
+ gt_path_list = glob.glob(os.path.join(args.dataset, 'CM_gt') + '/*.jpg')
+ prediction_path_list.sort()
+ gt_path_list.sort()
+
+ P, R, Acc, f1, IoU = [], [], [], [], []
+ prediction = Image.open(prediction_path_list[0])
+ for im in range(len(prediction_path_list)):
+ # predicted image
+ prediction = Image.open(prediction_path_list[im])
+ pred_W, pred_H = prediction.size
+ prediction = np.array(prediction) / 255.
+ # gt image
+ gt = Image.open(gt_path_list[im])
+ gt = gt.resize([pred_W, pred_H])
+ gt = np.array(gt) / 255.
+ gt = (gt >= 0.01).astype(int)
+
+ th = 0.1
+ tp = np.sum(np.logical_and(gt == 1, prediction > th))
+ tn = np.sum(np.logical_and(gt == 0, prediction <= th))
+ fp = np.sum(np.logical_and(gt == 0, prediction > th))
+ fn = np.sum(np.logical_and(gt == 1, prediction <= th))
+
+ # How accurate the positive predictions are
+ P.append(tp / (tp + fp))
+ # Coverage of actual positive sample
+ R.append(tp / (tp + fn))
+ # Overall performance of model
+ Acc.append((tp + tn) / (tp + tn + fp + fn))
+ # Hybrid metric useful for unbalanced classes
+ f1.append(2 * (tp / (tp + fp)) * (tp / (tp + fn)) / ((tp / (tp + fp)) + (tp / (tp + fn))))
+ # Intersection over Union
+ IoU.append(tp / (tp + fp + fn))
+
+ return np.mean(P), np.mean(R), np.mean(Acc), np.mean(f1), np.mean(IoU)
+
+
+def predict(image_path_list):
+ rgb_ph1 = tf.compat.v1.placeholder(tf.float32, shape=(None, args.im_height, args.im_width, args.im_ch))
+ rgb_ph = preprocess(rgb_ph1)
+
+ net = Models.LayoutEstimator_StdConvs({'rgb_input': rgb_ph}, is_training=False)
+
+ saver = tf.train.Saver()
+ config = tf.ConfigProto(log_device_placement=False,allow_soft_placement=True)
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭remap
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭
+ with tf.Session(config=config) as sess:
+
+ print('Loading the model')
+
+ saver.restore(sess, args.weights)
+
+ print('model loaded')
+
+ # Obtain network predictions
+ for image_path in image_path_list:
+
+ name = str(image_path)
+ filename = os.path.basename(name)
+
+ img = Image.open(image_path)
+ img = img.resize([args.im_width, args.im_height], Image.ANTIALIAS)
+ img = np.array(img).astype('float32')
+ img = np.expand_dims(np.asarray(img), axis=0)
+
+ fd = net.fd_test
+ fd[rgb_ph1] = img
+
+ prediction = net.get_layer_output("output_likelihood")
+ pred_edges, pred_corners = tf.split(prediction, [1, 1], 3)
+
+ tt = time.time();
+ emap, cmap = sess.run([tf.nn.sigmoid(pred_edges), tf.nn.sigmoid(pred_corners)], feed_dict=fd)
+ print("sec/step :", time.time() - tt)
+
+
+ # Save results
+ imageio.imwrite(os.path.join(args.results, 'EM_test', filename + "_emap.jpg"), emap[0, :, :, 0])
+ imageio.imwrite(os.path.join(args.results, 'CM_test', filename + "_emap.jpg"), cmap[0, :, :, 0])
+
+
+
+
+def main():
+
+ t = time.time()
+
+ if not os.path.exists(os.path.join(args.results, 'EM_test')): os.makedirs(os.path.join(args.results, 'EM_test'))
+ if not os.path.exists(os.path.join(args.results, 'CM_test')): os.makedirs(os.path.join(args.results, 'CM_test'))
+ pred = predict(glob.glob(os.path.join(args.dataset, 'RGB') + '/*.jpg'))
+ elapsed = time.time() - t
+ print('Total time in seconds:', elapsed / 1)
+
+ ## Give metrics
+ P_e, R_e, Acc_e, f1_e, IoU_e = evaluate('edges')
+ print('EDGES: IoU: ' + str('%.3f' % IoU_e) + '; Accuracy: ' + str('%.3f' % Acc_e) + '; Precision: ' + str(
+ '%.3f' % P_e) + '; Recall: ' + str('%.3f' % R_e) + '; f1 score: ' + str('%.3f' % f1_e))
+ P_c, R_c, Acc_c, f1_c, IoU_c = evaluate('corners')
+ print('CORNERS: IoU: ' + str('%.3f' % IoU_c) + '; Accuracy: ' + str('%.3f' % Acc_c) + '; Precision: ' + str(
+ '%.3f' % P_c) + '; Recall: ' + str('%.3f' % R_c) + '; f1 score: ' + str('%.3f' % f1_c))
+
+ print("Final Accuracy accuracy :"+str('%.3f'%Acc_c))
+ # latex format
+ latex = [str('$%.3f$' % IoU_c) + " & " + str('$%.3f$' % Acc_c) + " & " + str('$%.3f$' % P_c) + " & " + str(
+ '$%.3f$' % R_c) + " & " + str('$%.3f$' % f1_c)]
+ print(latex)
+
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file