diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/.keep b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/LICENSE b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/README.md b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a4923fa2c3356c49e6db56150fafe977910fdfd9
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/README.md
@@ -0,0 +1,165 @@
+# cosine_metric_learning
+
+## Introduction
+
+This repository contains code for training a metric feature representation to be
+used with the [deep_sort tracker](https://github.com/nwojke/deep_sort). The
+approach is described in
+
+ @inproceedings{Wojke2018deep,
+ title={Deep Cosine Metric Learning for Person Re-identification},
+ author={Wojke, Nicolai and Bewley, Alex},
+ booktitle={2018 IEEE Winter Conference on Applications of Computer Vision (WACV)},
+ year={2018},
+ pages={748--756},
+ organization={IEEE},
+ doi={10.1109/WACV.2018.00087}
+ }
+
+Pre-trained models used in the paper can be found
+[here](https://drive.google.com/open?id=13HtkxD6ggcrGJLWaUcqgXl2UO6-p4PK0).
+A preprint of the paper is available [here](http://elib.dlr.de/116408/).
+The repository comes with code to train a model on the
+[Market1501](http://www.liangzheng.org/Project/project_reid.html)
+and [MARS](http://www.liangzheng.com.cn/Project/project_mars.html) datasets.
+
+## Training on Market1501
+
+The following description assumes you have downloaded the Market1501 dataset to
+``./Market-1501-v15.09.15``. The following command starts training
+using the cosine-softmax classifier described in the above paper:
+```
+python train_market1501.py \
+ --dataset_dir=./Market-1501-v15.09.15/ \
+ --loss_mode=cosine-softmax \
+ --log_dir=./output/market1501/ \
+ --run_id=cosine-softmax
+```
+This will create a directory `./output/market1501/cosine-softmax` where
+TensorFlow checkpoints are stored and which can be monitored using
+``tensorboard``:
+```
+tensorboard --logdir ./output/market1501/cosine-softmax --port 6006
+```
+The code splits off 10% of the training data for validation.
+Concurrently to training, run the following command to run CMC evaluation
+metrics on the validation set:
+```
+CUDA_VISIBLE_DEVICES="" python train_market1501.py \
+ --mode=eval \
+ --dataset_dir=./Market-1501-v15.09.15/ \
+ --loss_mode=cosine-softmax \
+ --log_dir=./output/market1501/ \
+ --run_id=cosine-softmax \
+ --eval_log_dir=./eval_output/market1501
+```
+The command will block indefinitely to monitor the training directory for saved
+checkpoints and each stored checkpoint in the training directory is evaluated on
+the validation set. The results of this evaluation are stored in
+``./eval_output/market1501/cosine-softmax`` to be monitored using
+``tensorboard``:
+```
+tensorboard --logdir ./eval_output/market1501/cosine-softmax --port 6007
+```
+
+## Training on MARS
+
+To train on MARS, download the
+[evaluation software](https://github.com/liangzheng06/MARS-evaluation) and
+extract ``bbox_train.zip`` and ``bbox_test.zip`` from the
+[dataset website](http://www.liangzheng.com.cn/Project/project_mars.html)
+into the evaluation software directory. The following description assumes they
+are stored in ``./MARS-evaluation-master/bbox_train`` and
+``./MARS-evaluation-master/bbox_test``. Training can be started with the following
+command:
+```
+python train_mars.py \
+ --dataset_dir=./MARS-evaluation-master \
+ --loss_mode=cosine-softmax \
+ --log_dir=./output/mars/ \
+ --run_id=cosine-softmax
+```
+Again, this will create a directory `./output/mars/cosine-softmax` where
+TensorFlow checkpoints are stored and which can be monitored using
+``tensorboard``:
+```
+tensorboard --logdir ./output/mars/cosine-softmax --port 7006
+```
+As for Market1501, 10% of the training data are split off for validation.
+Concurrently to training, run the following command to run CMC evaluation
+metrics on the validation set:
+```
+CUDA_VISIBLE_DEVICES="" python train_mars.py \
+ --mode=eval \
+ --dataset_dir=./MARS-evaluation-master/ \
+ --loss_mode=cosine-softmax \
+ --log_dir=./output/mars/ \
+ --run_id=cosine-softmax \
+ --eval_log_dir=./eval_output/mars
+```
+Evaluation metrics on the validation set can be monitored with ``tensorboard``
+```
+tensorboard --logdir ./eval_output/mars/cosine-softmax
+```
+
+## Testing
+
+Final model testing has been carried out using evaluation software provided by
+the dataset authors. The training scripts can be used to write features of the
+test split. The following command exports MARS test features to
+``./MARS-evaluation-master/feat_test.mat``
+```
+python train_mars.py \
+ --mode=export \
+ --dataset_dir=./MARS-evaluation-master \
+ --loss_mode=cosine-softmax .\
+ --restore_path=PATH_TO_CHECKPOINT
+```
+where ``PATH_TO_CHECKPOINT`` the checkpoint file to evaluate. Note that the
+evaluation script needs minor adjustments to apply the cosine similarity metric.
+More precisely, change the feature computation in
+``utils/process_box_features.m`` to average pooling (line 8) and apply
+a re-normalization at the end of the file. The modified file should look like
+this:
+```
+function video_feat = process_box_feat(box_feat, video_info)
+
+nVideo = size(video_info, 1);
+video_feat = zeros(size(box_feat, 1), nVideo);
+for n = 1:nVideo
+ feature_set = box_feat(:, video_info(n, 1):video_info(n, 2));
+% video_feat(:, n) = max(feature_set, [], 2); % max pooling
+ video_feat(:, n) = mean(feature_set, 2); % avg pooling
+end
+
+%%% normalize train and test features
+sum_val = sqrt(sum(video_feat.^2));
+for n = 1:size(video_feat, 1)
+ video_feat(n, :) = video_feat(n, :)./sum_val;
+end
+```
+The Market1501 script contains a similar export functionality which can be
+applied in the same way as described for MARS:
+```
+python train_market1501.py \
+ --mode=export \
+ --dataset_dir=./Market-1501-v15.09.15/
+ --sdk_dir=./Market-1501_baseline-v16.01.14/
+ --loss_mode=cosine-softmax \
+ --restore_path=PATH_TO_CHECKPOINT
+```
+This command creates ``./Market-1501_baseline-v16.01.14/feat_query.mat`` and
+``./Market-1501_baseline-v16.01.14/feat_test.mat`` to be used with the
+Market1501 evaluation code.
+
+## Model export
+
+To export your trained model for use with the
+[deep_sort tracker](https://github.com/nwojke/deep_sort), run the following
+command:
+```
+python train_mars.py --mode=freeze --restore_path=PATH_TO_CHECKPOINT
+```
+This will create a ``mars.pb`` file which can be supplied to Deep SORT. Again,
+the Market1501 script contains a similar function.
+##kernel_meta&ckpt&resources (three missed doc)
\ No newline at end of file
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/boot_modelarts.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/boot_modelarts.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8e0663c467887ed19065b832c6552f9310c924e
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/boot_modelarts.py
@@ -0,0 +1,72 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This is the boot file for ModelArts platform.
+Firstly, the train datasets are copyed from obs to ModelArts.
+Then, the string of train shell command is concated and using 'os.system()' to execute
+"""
+import os
+import numpy as np
+import argparse
+from help_modelarts import obs_data2modelarts
+from cfg import make_config
+print(os.system('env'))
+
+
+import argparse
+import moxing as mox
+# 解析输入参数data_url
+
+
+
+if __name__ == '__main__':
+ ## Note: the code dir is not the same as work dir on ModelArts Platform!!!
+ code_dir = os.path.dirname(__file__)
+ work_dir = os.getcwd()
+ print("===>>>code_dir:{}, work_dir:{}".format(code_dir, work_dir))
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--data_url", type=str, default="./dataset")
+ parser.add_argument("--train_url", type=str, default="./output")
+ config = parser.parse_args()
+ # 在ModelArts容器创建数据存放目录
+ data_dir = "/cache/dataset"
+ model_dir = "/cache/result"
+ os.makedirs(data_dir)
+ os.makedirs(model_dir)
+ # OBS数据拷贝到ModelArts容器内
+ mox.file.copy_parallel(config.data_url, data_dir)
+ mox.file.copy_parallel(model_dir, config.train_url)
+ #parser.add_argument("--train_url", type=str, default="s3://boom/Market-1501-v15.09.15/")
+ #parser.add_argument("--data_url", type=str, default="s3://boom/Market-1501-v15.09.15/")
+ #parser.add_argument("--modelarts_data_dir", type=str, default="/cache/Market-1501-v15.09.15")
+ #parser.add_argument("--modelarts_result_dir", type=str, default="./output")
+
+
+ print("--------config----------")
+ for k in list(vars(config).keys()):
+ print("key:{}: value:{}".format(k, vars(config)[k]))
+ print("--------config----------")
+
+ ## copy dataset from obs to modelarts
+ obs_data2modelarts(config)
+
+ ## start to train on Modelarts platform
+ if not os.path.exists(config.modelarts_result_dir):
+ os.makedirs(config.modelarts_result_dir)
+ bash_header = os.path.join(code_dir, 'scripts/run_1p.sh')
+ arg_url = '%s %s %s %s' % (code_dir, config.modelarts_data_dir, config.modelarts_result_dir)
+ bash_command = 'bash %s %s' % (bash_header, arg_url)
+ print("bash command:", bash_command)
+ os.system(bash_command)
\ No newline at end of file
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/cfg.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/cfg.py
new file mode 100644
index 0000000000000000000000000000000000000000..a07258f7a6a7b4598581e263904b8bb34fd95906
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/cfg.py
@@ -0,0 +1,46 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Here, define the configuration of tensorflow session
+For different chips, the config is not the same.
+"""
+import tensorflow as tf
+import os
+
+
+def make_config(FLAGS):
+ chip = FLAGS.chip.lower()
+ tf.logging.info("chip is [%s]", chip)
+
+ if chip == 'cpu':
+ config = tf.ConfigProto()
+ elif chip == 'gpu':
+ config = tf.ConfigProto(allow_soft_placement=True)
+ config.gpu_options.allow_growth = True
+ elif chip == 'npu':
+ from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
+
+ config = tf.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭
+ ## Performance Profiling
+ ## refer to link:https://support.huaweicloud.com/Development-tg-cann202training1/atlasprofilingtrain_16_0003.html
+
+
+ else:
+ raise RuntimeError('chip [%s] has not supported' % chip)
+
+ return config
\ No newline at end of file
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/__init__.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..43e08fb8a989d9442405ec7dc442df4ff7ba6d5c
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/__init__.py
@@ -0,0 +1 @@
+# vim: expandtab:ts=4:sw=4
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/market1501.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/market1501.py
new file mode 100644
index 0000000000000000000000000000000000000000..65b31a43868796374898a2ec71a5adf300dbaf9e
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/market1501.py
@@ -0,0 +1,214 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import os
+import numpy as np
+import cv2
+import scipy.io as sio
+
+
+# The maximum person ID in the dataset.
+MAX_LABEL = 1501
+
+IMAGE_SHAPE = 128, 64, 3
+
+
+def _parse_filename(filename):
+ """Parse meta-information from given filename.
+
+ Parameters
+ ----------
+ filename : str
+ A Market 1501 image filename.
+
+ Returns
+ -------
+ (int, int, str, str) | NoneType
+ Returns a tuple with the following entries:
+
+ * Unique ID of the individual in the image
+ * Index of the camera which has observed the individual
+ * Filename without extension
+ * File extension
+
+ Returns None if the given filename is not a valid filename.
+
+ """
+ filename_base, ext = os.path.splitext(filename)
+ if '.' in filename_base:
+ # Some images have double filename extensions.
+ filename_base, ext = os.path.splitext(filename_base)
+ if ext != ".jpg":
+ return None
+ person_id, cam_seq, frame_idx, detection_idx = filename_base.split('_')
+ return int(person_id), int(cam_seq[1]), filename_base, ext
+
+
+def read_train_split_to_str(dataset_dir):
+ """Read training data to list of filenames.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the Market 1501 dataset directory.
+
+ Returns
+ -------
+ (List[str], List[int], List[int])
+ Returns a tuple with the following values:
+
+ * List of image filenames (full path to image files).
+ * List of unique IDs for the individuals in the images.
+ * List of camera indices.
+
+ """
+ filenames, ids, camera_indices = [], [], []
+
+ image_dir = os.path.join(dataset_dir, "bounding_box_train")
+ for filename in sorted(os.listdir(image_dir)):
+ meta_data = _parse_filename(filename)
+ if meta_data is None:
+ # This is not a valid filename (e.g., Thumbs.db).
+ continue
+
+ filenames.append(os.path.join(image_dir, filename))
+ ids.append(meta_data[0])
+ camera_indices.append(meta_data[1])
+
+ return filenames, ids, camera_indices
+
+
+def read_train_split_to_image(dataset_dir):
+ """Read training images to memory. This consumes a lot of memory.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the Market 1501 dataset directory.
+
+ Returns
+ -------
+ (ndarray, ndarray, ndarray)
+ Returns a tuple with the following values:
+
+ * Tensor of images in BGR color space of shape 128x64x3.
+ * One dimensional array of unique IDs for the individuals in the images.
+ * One dimensional array of camera indices.
+
+ """
+ filenames, ids, camera_indices = read_train_split_to_str(dataset_dir)
+
+ images = np.zeros((len(filenames), 128, 64, 3), np.uint8)
+ for i, filename in enumerate(filenames):
+ images[i] = cv2.imread(filename, cv2.IMREAD_COLOR)
+
+ ids = np.asarray(ids, np.int64)
+ camera_indices = np.asarray(camera_indices, np.int64)
+ return images, ids, camera_indices
+
+
+def read_test_split_to_str(dataset_dir):
+ """Read query and gallery data to list of filenames.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the Market 1501 dataset directory.
+
+ Returns
+ -------
+ (List[str], List[int], List[str], List[int], ndarray)
+ Returns a tuple with the following values:
+
+ * List of N gallery filenames (full path to image files).
+ * List of N unique IDs for the individuals in the gallery.
+ * List of M query filenames (full path to image files).
+ * List of M unique IDs for the individuals in the queries.
+ * Matrix of shape MxN such that element (i, j) evaluates to 0 if
+ gallery image j should be excluded from metrics computation of
+ query i and 1 otherwise.
+
+ """
+ # Read gallery.
+ gallery_filenames, gallery_ids = [], []
+
+ image_dir = os.path.join(dataset_dir, "bounding_box_test")
+ for filename in sorted(os.listdir(image_dir)):
+ meta_data = _parse_filename(filename)
+ if meta_data is None:
+ # This is not a valid filename (e.g., Thumbs.db).
+ continue
+
+ gallery_filenames.append(os.path.join(image_dir, filename))
+ gallery_ids.append(meta_data[0])
+
+ # Read queries.
+ query_filenames, query_ids, query_junk_indices = [], [], []
+
+ image_dir = os.path.join(dataset_dir, "query")
+ for filename in sorted(os.listdir(image_dir)):
+ meta_data = _parse_filename(filename)
+ if meta_data is None:
+ # This is not a valid filename (e.g., Thumbs.db).
+ continue
+
+ filename_base = meta_data[2]
+ junk_matfile = filename_base + "_junk.mat"
+ mat = sio.loadmat(os.path.join(dataset_dir, "gt_query", junk_matfile))
+ if np.any(mat["junk_index"] < 1):
+ indices = []
+ else:
+ # MATLAB to Python index.
+ indices = list(mat["junk_index"].astype(np.int64).ravel() - 1)
+
+ query_junk_indices.append(indices)
+ query_filenames.append(os.path.join(image_dir, filename))
+ query_ids.append(meta_data[0])
+
+ # The following matrix maps from query (row) to gallery image (column) such
+ # that element (i, j) evaluates to 0 if query i and gallery image j should
+ # be excluded from computation of the evaluation metrics and 1 otherwise.
+ good_mask = np.ones(
+ (len(query_filenames), len(gallery_filenames)), np.float32)
+ for i, junk_indices in enumerate(query_junk_indices):
+ good_mask[i, junk_indices] = 0.
+
+ return gallery_filenames, gallery_ids, query_filenames, query_ids, good_mask
+
+
+def read_test_split_to_image(dataset_dir):
+ """Read query and gallery data to memory. This consumes a lot of memory.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the Market 1501 dataset directory.
+
+ Returns
+ -------
+ (ndarray, ndarray, ndarray, ndarray, ndarray)
+ Returns a tuple with the following values:
+
+ * Tensor of shape Nx128x64x3 of N gallery images in BGR color space.
+ * One dimensional array of N unique gallery IDs.
+ * Tensor of shape Mx128x64x3 of M query images in BGR color space.
+ * One dimensional array of M unique query IDs.
+ * Matrix of shape MxN such that element (i, j) evaluates to 0 if
+ gallery image j should be excluded from metrics computation of
+ query i and 1 otherwise.
+
+ """
+ gallery_filenames, gallery_ids, query_filenames, query_ids, good_mask = (
+ read_test_split_to_str(dataset_dir))
+
+ gallery_images = np.zeros((len(gallery_filenames), 128, 64, 3), np.uint8)
+ for i, filename in enumerate(gallery_filenames):
+ gallery_images[i] = cv2.imread(filename, cv2.IMREAD_COLOR)
+
+ query_images = np.zeros((len(query_filenames), 128, 64, 3), np.uint8)
+ for i, filename in enumerate(query_filenames):
+ query_images[i] = cv2.imread(filename, cv2.IMREAD_COLOR)
+
+ gallery_ids = np.asarray(gallery_ids, np.int64)
+ query_ids = np.asarray(query_ids, np.int64)
+ return gallery_images, gallery_ids, query_images, query_ids, good_mask
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/mars.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/mars.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f7d117ed55ad5aa4e3214591fb6debd8aed35a3
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/mars.py
@@ -0,0 +1,190 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import os
+import numpy as np
+import cv2
+
+
+# The maximum person ID in the dataset.
+MAX_LABEL = 1500
+
+IMAGE_SHAPE = 256, 128, 3
+
+
+def read_train_test_directory_to_str(directory):
+ """Read bbox_train/bbox_test directory.
+
+ Parameters
+ ----------
+ directory : str
+ Path to bbox_train/bbox_test directory.
+
+ Returns
+ -------
+ (List[str], List[int], List[int], List[int])
+ Returns a tuple with the following entries:
+
+ * List of image filenames.
+ * List of corresponding unique IDs for the individuals in the images.
+ * List of camera indices.
+ * List of tracklet indices.
+
+ """
+
+ def to_label(x):
+ return int(x) if x.isdigit() else -1
+
+ dirnames = os.listdir(directory)
+ image_filenames, ids, camera_indices, tracklet_indices = [], [], [], []
+ for dirname in dirnames:
+ filenames = os.listdir(os.path.join(directory, dirname))
+ filenames = [
+ f for f in filenames if os.path.splitext(f)[1] == ".jpg"]
+ image_filenames += [
+ os.path.join(directory, dirname, f) for f in filenames]
+ ids += [to_label(dirname) for _ in filenames]
+ camera_indices += [int(f[5]) for f in filenames]
+ tracklet_indices += [int(f[7:11]) for f in filenames]
+
+ return image_filenames, ids, camera_indices, tracklet_indices
+
+
+def read_train_test_directory_to_image(directory, image_shape=(128, 64)):
+ """Read images in bbox_train/bbox_test directory.
+
+ Parameters
+ ----------
+ directory : str
+ Path to bbox_train/bbox_test directory.
+ image_shape : Tuple[int, int]
+ A tuple (height, width) of the desired image size.
+
+ Returns
+ -------
+ (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
+ Returns a tuple with the following entries:
+
+ * Tensor of images in BGR color space.
+ * One dimensional array of unique IDs for the individuals in the images.
+ * One dimensional array of camera indices.
+ * One dimensional array of tracklet indices.
+
+ """
+ reshape_fn = (
+ (lambda x: x) if image_shape == IMAGE_SHAPE[:2]
+ else (lambda x: cv2.resize(x, image_shape[::-1])))
+
+ filenames, ids, camera_indices, tracklet_indices = (
+ read_train_test_directory_to_str(directory))
+
+ images = np.zeros((len(filenames), ) + image_shape + (3, ), np.uint8)
+ for i, filename in enumerate(filenames):
+ if i % 1000 == 0:
+ print("Reading %s, %d / %d" % (directory, i, len(filenames)))
+ image = cv2.imread(filename, cv2.IMREAD_COLOR)
+ images[i] = reshape_fn(image)
+ ids = np.asarray(ids, dtype=np.int64)
+ camera_indices = np.asarray(camera_indices, dtype=np.int64)
+ tracklet_indices = np.asarray(tracklet_indices, dtype=np.int64)
+ return images, ids, camera_indices, tracklet_indices
+
+
+def read_train_split_to_str(dataset_dir):
+ """Read training data to list of filenames.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the MARS dataset directory; ``bbox_train`` should be a
+ subdirectory of this folder.
+
+ Returns
+ -------
+ (List[str], List[int], List[int], List[int])
+ Returns a tuple with the following entries:
+
+ * List of image filenames.
+ * List of corresponding unique IDs for the individuals in the images.
+ * List of camera indices.
+ * List of tracklet indices.
+
+ """
+ train_dir = os.path.join(dataset_dir, "bbox_train")
+ return read_train_test_directory_to_str(train_dir)
+
+
+def read_train_split_to_image(dataset_dir, image_shape=(128, 64)):
+ """Read training images to memory. This consumes a lot of memory.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the MARS dataset directory; ``bbox_train`` should be a
+ subdirectory of this folder.
+ image_shape : Tuple[int, int]
+ A tuple (height, width) of the desired image size.
+
+ Returns
+ -------
+ (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
+ Returns a tuple with the following entries:
+
+ * Tensor of images in BGR color space.
+ * One dimensional array of unique IDs for the individuals in the images.
+ * One dimensional array of camera indices.
+ * One dimensional array of tracklet indices.
+
+ """
+ train_dir = os.path.join(dataset_dir, "bbox_train")
+ return read_train_test_directory_to_image(train_dir, image_shape)
+
+
+def read_test_split_to_str(dataset_dir):
+ """Read training data to list of filenames.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the MARS dataset directory; ``bbox_test`` should be a
+ subdirectory of this folder.
+
+ Returns
+ -------
+ (List[str], List[int], List[int], List[int])
+ Returns a tuple with the following entries:
+
+ * List of image filenames.
+ * List of corresponding unique IDs for the individuals in the images.
+ * List of camera indices.
+ * List of tracklet indices.
+
+ """
+ test_dir = os.path.join(dataset_dir, "bbox_test")
+ return read_train_test_directory_to_str(test_dir)
+
+
+def read_test_split_to_image(dataset_dir, image_shape=(128, 64)):
+ """Read test images to memory. This consumes a lot of memory.
+
+ Parameters
+ ----------
+ dataset_dir : str
+ Path to the MARS dataset directory; ``bbox_test`` should be a
+ subdirectory of this folder.
+ image_shape : Tuple[int, int]
+ A tuple (height, width) of the desired image size.
+
+ Returns
+ -------
+ (np.ndarray, np.ndarray, np.ndarray, np.ndarray)
+ Returns a tuple with the following entries:
+
+ * Tensor of images in BGR color space.
+ * One dimensional array of unique IDs for the individuals in the images.
+ * One dimensional array of camera indices.
+ * One dimensional array of tracklet indices.
+
+ """
+ test_dir = os.path.join(dataset_dir, "bbox_test")
+ return read_train_test_directory_to_image(test_dir, image_shape)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/util.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..5762ff7b237b335388acc62cbdcf65590f2ad921
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/datasets/util.py
@@ -0,0 +1,173 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import numpy as np
+import cv2
+
+
+def crop_to_shape(images, patch_shape):
+ """Crop images to desired shape, respecting the target aspect ratio.
+
+ Parameters
+ ----------
+ images : List[ndarray]
+ A list of images in BGR format (dtype np.uint8)
+ patch_shape : (int, int)
+ Target image patch shape (height, width).
+
+ Returns
+ -------
+ ndarray
+ A tensor of output images.
+
+ """
+ assert len(images) > 0, "Empty image list is not allowed."
+ channels = () if len(images[0].shape) == 0 else (images[0].shape[-1], )
+ output_images = np.zeros(
+ (len(images), ) + patch_shape + channels, dtype=np.uint8)
+
+ target_aspect_ratio = float(patch_shape[1]) / patch_shape[0]
+ for i, image in enumerate(images):
+ image_aspect_ratio = float(image.shape[1]) / image.shape[0]
+ if target_aspect_ratio > image_aspect_ratio:
+ # Fix width, modify height.
+ crop_height = image.shape[1] / target_aspect_ratio
+ crop_width = image.shape[1]
+ else:
+ # Fix height, modify width.
+ crop_width = target_aspect_ratio * image.shape[0]
+ crop_height = image.shape[0]
+
+ sx = int((image.shape[1] - crop_width) / 2)
+ sy = int((image.shape[0] - crop_height) / 2)
+ ex = int(min(sx + crop_width, image.shape[1]))
+ ey = int(min(sy + crop_height, image.shape[0]))
+ output_images[i, ...] = cv2.resize(
+ image[sy:ey, sx:ex], patch_shape[::-1],
+ interpolation=cv2.INTER_CUBIC)
+
+ return output_images
+
+
+def create_validation_split(data_y, num_validation_y, seed=None):
+ """Split dataset into training and validation set with disjoint classes.
+
+ Parameters
+ ----------
+ data_y : ndarray
+ A label vector.
+ num_validation_y : int | float
+ The number of identities to split off for validation. If an integer
+ is given, this value should be at least 1 and is interpreted as absolute
+ number of validation identities. If a float is given, this value should
+ be in [0, 1[ and is interpreted as fraction of validation identities.
+ seed : Optional[int]
+ A random generator seed used to select the validation idenities.
+
+ Returns
+ -------
+ (ndarray, ndarray)
+ Returns indices of training and validation set.
+
+ """
+ unique_y = np.unique(data_y)
+ if isinstance(num_validation_y, float):
+ num_validation_y = int(num_validation_y * len(unique_y))
+
+ random_generator = np.random.RandomState(seed=seed)
+ validation_y = random_generator.choice(
+ unique_y, num_validation_y, replace=False)
+
+ validation_mask = np.full((len(data_y), ), False, bool)
+ for y in validation_y:
+ validation_mask = np.logical_or(validation_mask, data_y == y)
+ training_mask = np.logical_not(validation_mask)
+ return np.where(training_mask)[0], np.where(validation_mask)[0]
+
+
+def limit_num_elements_per_identity(data_y, max_num_images_per_id, seed=None):
+ """Limit the number of elements per identity to `max_num_images_per_id`.
+
+ Parameters
+ ----------
+ data_y : ndarray
+ A label vector.
+ max_num_images_per_id : int
+ The maximum number of elements per identity that should remain in
+ the data set.
+ seed : Optional[int]
+ Random generator seed.
+
+ Returns
+ -------
+ ndarray
+ A boolean mask that evaluates to True if the corresponding
+ should remain in the data set.
+
+ """
+ random_generator = np.random.RandomState(seed=seed)
+ valid_mask = np.full((len(data_y), ), False, bool)
+ for y in np.unique(data_y):
+ indices = np.where(data_y == y)[0]
+ num_select = min(len(indices), max_num_images_per_id)
+ indices = random_generator.choice(indices, num_select, replace=False)
+ valid_mask[indices] = True
+ return valid_mask
+
+
+def create_cmc_probe_and_gallery(data_y, camera_indices=None, seed=None):
+ """Create probe and gallery images for evaluation of CMC top-k statistics.
+
+ For every identity, this function selects one image as probe and one image
+ for the gallery. Cross-view validation is performed when multiple cameras
+ are given.
+
+ Parameters
+ ----------
+ data_y : ndarray
+ Vector of data labels.
+ camera_indices : Optional[ndarray]
+ Optional array of camera indices. If possible, probe and gallery images
+ are selected from different cameras (i.e., cross-view validation).
+ If None given, assumes all images are taken from the same camera.
+ seed : Optional[int]
+ The random seed used to select probe and gallery images.
+
+ Returns
+ -------
+ (ndarray, ndarray)
+ Returns a tuple of indices to probe and gallery images.
+
+ """
+ data_y = np.asarray(data_y)
+ if camera_indices is None:
+ camera_indices = np.zeros_like(data_y, dtype=np.int)
+ camera_indices = np.asarray(camera_indices)
+
+ random_generator = np.random.RandomState(seed=seed)
+ unique_y = np.unique(data_y)
+ probe_indices, gallery_indices = [], []
+ for y in unique_y:
+ mask_y = data_y == y
+
+ unique_cameras = np.unique(camera_indices[mask_y])
+ if len(unique_cameras) == 1:
+ # If we have only one camera, take any two images from this device.
+ c = unique_cameras[0]
+ indices = np.where(np.logical_and(mask_y, camera_indices == c))[0]
+ if len(indices) < 2:
+ continue # Cannot generate a pair for this identity.
+ i1, i2 = random_generator.choice(indices, 2, replace=False)
+ else:
+ # If we have multiple cameras, take images of two (randomly chosen)
+ # different devices.
+ c1, c2 = random_generator.choice(unique_cameras, 2, replace=False)
+ indices1 = np.where(np.logical_and(mask_y, camera_indices == c1))[0]
+ indices2 = np.where(np.logical_and(mask_y, camera_indices == c2))[0]
+ i1 = random_generator.choice(indices1)
+ i2 = random_generator.choice(indices2)
+
+ probe_indices.append(i1)
+ gallery_indices.append(i2)
+
+ return np.asarray(probe_indices), np.asarray(gallery_indices)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/help_modelarts.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/help_modelarts.py
new file mode 100644
index 0000000000000000000000000000000000000000..edd3b80fc4b8456c80ebec9076124ff5e29b1612
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/help_modelarts.py
@@ -0,0 +1,80 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import datetime
+import moxing as mox
+
+def obs_data2modelarts(config):
+ """
+ Copy train data from obs to modelarts by using moxing api.
+ """
+ start = datetime.datetime.now()
+ print("===>>>Copy files from obs:{} to modelarts dir:{}".format(config.data_url, config.modelarts_data_dir))
+ mox.file.copy_parallel(src_url=config.data_url, dst_url=config.modelarts_data_dir)
+ end = datetime.datetime.now()
+ print("===>>>Copy from obs to modelarts, time use:{}(s)".format((end - start).seconds))
+ files = os.listdir(config.modelarts_data_dir)
+ print("===>>>Files:", files)
+
+
+def modelarts_result2obs(FLAGS):
+ """
+ Copy debug data from modelarts to obs.
+ According to the swich flags, the debug data may contains auto tune repository,
+ dump data for precision comparision, even the computation graph and profiling data.
+ """
+ work_dir = os.getcwd()
+
+ ## copy result from modelarts to obs
+ obs_result_dir = os.path.join(FLAGS.obs_dir, 'result')
+ if not mox.file.exists(obs_result_dir):
+ mox.file.make_dirs(obs_result_dir)
+ mox.file.copy_parallel(src_url=FLAGS.result, dst_url=obs_result_dir)
+ print("===>>>Copy Event or Checkpoint from modelarts dir:{} to obs:{}".format(FLAGS.result, obs_result_dir))
+
+ ## Copy auto tune repository. Comment this snippets if npu_auto_tune is off.
+ if FLAGS.npu_auto_tune:
+ modelarts_auto_tune_dir = os.path.join(work_dir, "npu_auto_tune")
+ obs_auto_tune_dir = os.path.join(FLAGS.obs_dir, 'npu_auto_tune')
+ if not mox.file.exists(obs_auto_tune_dir):
+ mox.file.make_dirs(obs_auto_tune_dir)
+ mox.file.copy_parallel(modelarts_auto_tune_dir, obs_auto_tune_dir)
+ print("===>>>Auto tune:{} on OBS dir:{}".format(mox.file.list_directory(obs_auto_tune_dir), obs_auto_tune_dir))
+
+ ## Copy dump data. Comment this snippets if npu_dump_data is off.
+ if FLAGS.npu_dump_data:
+ modelarts_dump_data_dir = os.path.join(work_dir, "npu_dump_data")
+ obs_dump_data_dir = os.path.join(FLAGS.obs_dir, 'npu_dump_data')
+ if not mox.file.exists(obs_dump_data_dir):
+ mox.file.make_dirs(obs_dump_data_dir)
+ mox.file.copy_parallel(modelarts_dump_data_dir, obs_dump_data_dir)
+ print("===>>>Dumped graph:{} on OBS dir:{}".format(mox.file.list_directory(obs_dump_data_dir), obs_dump_data_dir))
+
+ ## Copy compute graph. Comment this snippets if npu_dump_graph is off.
+ if FLAGS.npu_dump_graph:
+ modelarts_dump_graph_dir = os.path.join(work_dir, "npu_dump_graph")
+ obs_dump_graph_dir = os.path.join(FLAGS.obs_dir, 'npu_dump_graph')
+ if not mox.file.exists(obs_dump_graph_dir):
+ mox.file.make_dirs(obs_dump_graph_dir)
+ mox.file.copy_parallel(modelarts_dump_graph_dir, obs_dump_graph_dir)
+ print("===>>>Dumped data:{} on OBS dir:{}".format(mox.file.list_directory(obs_dump_graph_dir), obs_dump_graph_dir))
+
+ ## Copy profiling data. Comment this snippets if npu_profiling is off.
+ if FLAGS.npu_profiling:
+ modelarts_profiling_dir = os.path.join(work_dir, "npu_profiling")
+ obs_profiling_dir = os.path.join(FLAGS.obs_dir, 'npu_profiling')
+ if not mox.file.exists(obs_profiling_dir):
+ mox.file.make_dirs(obs_profiling_dir)
+ mox.file.copy_parallel(modelarts_profiling_dir, obs_profiling_dir)
+ print("===>>>Profiling data:{} on OBS dir:{}".format(mox.file.list_directory(obs_profiling_dir), obs_profiling_dir))
\ No newline at end of file
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/losses.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..fda1c33e298ded76b95c3325a915bb9af1c764fe
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/losses.py
@@ -0,0 +1,145 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import tensorflow as tf
+
+
+def _pdist(a, b=None):
+ sq_sum_a = tf.reduce_sum(tf.square(a), reduction_indices=[1])
+ if b is None:
+ return -2 * tf.matmul(a, tf.transpose(a)) + \
+ tf.reshape(sq_sum_a, (-1, 1)) + tf.reshape(sq_sum_a, (1, -1))
+ sq_sum_b = tf.reduce_sum(tf.square(b), reduction_indices=[1])
+ return -2 * tf.matmul(a, tf.transpose(b)) + \
+ tf.reshape(sq_sum_a, (-1, 1)) + tf.reshape(sq_sum_b, (1, -1))
+
+
+def softmargin_triplet_loss(features, labels, create_summaries=True):
+ """Softmargin triplet loss.
+
+ See::
+
+ Hermans, Beyer, Leibe: In Defense of the Triplet Loss for Person
+ Re-Identification. arXiv, 2017.
+
+ Parameters
+ ----------
+ features : tf.Tensor
+ A matrix of shape NxM that contains the M-dimensional feature vectors
+ of N objects (floating type).
+ labels : tf.Tensor
+ The one-dimensional array of length N that contains for each feature
+ the associated class label (integer type).
+ create_summaries : Optional[bool]
+ If True, creates summaries to monitor training behavior.
+
+ Returns
+ -------
+ tf.Tensor
+ A scalar loss tensor.
+
+ """
+ eps = tf.constant(1e-5, tf.float32)
+ nil = tf.constant(0., tf.float32)
+ almost_inf = tf.constant(1e+10, tf.float32)
+
+ squared_distance_mat = _pdist(features)
+ distance_mat = tf.sqrt(tf.maximum(nil, eps + squared_distance_mat))
+ label_mat = tf.cast(tf.equal(
+ tf.reshape(labels, (-1, 1)), tf.reshape(labels, (1, -1))), tf.float32)
+
+ positive_distance = tf.reduce_max(label_mat * distance_mat, axis=1)
+ negative_distance = tf.reduce_min(
+ (label_mat * almost_inf) + distance_mat, axis=1)
+ loss = tf.nn.softplus(positive_distance - negative_distance)
+ if create_summaries:
+ fraction_invalid_pdist = tf.reduce_mean(
+ tf.cast(tf.less_equal(squared_distance_mat, -eps), tf.float32))
+ tf.summary.scalar("fraction_invalid_pdist", fraction_invalid_pdist)
+
+ fraction_active_triplets = tf.reduce_mean(
+ tf.cast(tf.greater_equal(loss, 1e-5), tf.float32))
+ tf.summary.scalar("fraction_active_triplets", fraction_active_triplets)
+
+ embedding_squared_norm = tf.reduce_mean(
+ tf.reduce_sum(tf.square(features), axis=1))
+ tf.summary.scalar("mean squared feature norm", embedding_squared_norm)
+
+ mean_distance = tf.reduce_mean(distance_mat)
+ tf.summary.scalar("mean feature distance", mean_distance)
+
+ mean_positive_distance = tf.reduce_mean(positive_distance)
+ tf.summary.scalar("mean positive distance", mean_positive_distance)
+
+ mean_negative_distance = tf.reduce_mean(negative_distance)
+ tf.summary.scalar("mean negative distance", mean_negative_distance)
+
+ return tf.reduce_mean(loss)
+
+
+def magnet_loss(features, labels, margin=1.0, unique_labels=None):
+ """Simple unimodal magnet loss.
+
+ See::
+
+ Rippel, Paluri, Dollar, Bourdev: Metric Learning With Adaptive
+ Density Discrimination. ICLR, 2016.
+
+ Parameters
+ ----------
+ features : tf.Tensor
+ A matrix of shape NxM that contains the M-dimensional feature vectors
+ of N objects (floating type).
+ labels : tf.Tensor
+ The one-dimensional array of length N that contains for each feature
+ the associated class label (integer type).
+ margin : float
+ A scalar margin hyperparameter.
+ unique_labels : Optional[tf.Tensor]
+ Optional tensor of unique values in `labels`. If None given, computed
+ from data.
+
+ Returns
+ -------
+ tf.Tensor
+ A scalar loss tensor.
+
+ """
+ nil = tf.constant(0., tf.float32)
+ one = tf.constant(1., tf.float32)
+ minus_two = tf.constant(-2., tf.float32)
+ eps = tf.constant(1e-4, tf.float32)
+ margin = tf.constant(margin, tf.float32)
+
+ num_per_class = None
+ if unique_labels is None:
+ unique_labels, sample_to_unique_y, num_per_class = tf.unique_with_counts(labels)
+ num_per_class = tf.cast(num_per_class, tf.float32)
+
+ y_mat = tf.cast(tf.equal(
+ tf.reshape(labels, (-1, 1)), tf.reshape(unique_labels, (1, -1))),
+ dtype=tf.float32)
+
+ # If class_means is None, compute from batch data.
+ if num_per_class is None:
+ num_per_class = tf.reduce_sum(y_mat, reduction_indices=[0])
+ class_means = tf.reduce_sum(
+ tf.expand_dims(tf.transpose(y_mat), -1) * tf.expand_dims(features, 0),
+ reduction_indices=[1]) / tf.expand_dims(num_per_class, -1)
+
+ squared_distance = _pdist(features, class_means)
+
+ num_samples = tf.cast(tf.shape(labels)[0], tf.float32)
+ variance = tf.reduce_sum(
+ y_mat * squared_distance) / (num_samples - one)
+
+ const = one / (minus_two * (variance + eps))
+ linear = const * squared_distance - y_mat * margin
+
+ maxi = tf.reduce_max(linear, reduction_indices=[1], keepdims=True)
+ loss_mat = tf.exp(linear - maxi)
+
+ a = tf.reduce_sum(y_mat * loss_mat, reduction_indices=[1])
+ b = tf.reduce_sum((one - y_mat) * loss_mat, reduction_indices=[1])
+ loss = tf.maximum(nil, -tf.log(eps + a / (eps + b)))
+ return tf.reduce_mean(loss), class_means, variance
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/metrics.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..568d378b6567ec6060437b705ec2367f34b2022e
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/metrics.py
@@ -0,0 +1,223 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+
+
+def pdist(a, b=None):
+ """Compute element-wise squared distance between `a` and `b`.
+
+ Parameters
+ ----------
+ a : tf.Tensor
+ A matrix of shape NxL with N row-vectors of dimensionality L.
+ b : tf.Tensor
+ A matrix of shape MxL with M row-vectors of dimensionality L.
+
+ Returns
+ -------
+ tf.Tensor
+ A matrix of shape NxM where element (i, j) contains the squared
+ distance between elements `a[i]` and `b[j]`.
+
+ """
+ sq_sum_a = tf.reduce_sum(tf.square(a), reduction_indices=[1])
+ if b is None:
+ return -2 * tf.matmul(a, tf.transpose(a)) + \
+ tf.reshape(sq_sum_a, (-1, 1)) + tf.reshape(sq_sum_a, (1, -1))
+ sq_sum_b = tf.reduce_sum(tf.square(b), reduction_indices=[1])
+ return -2 * tf.matmul(a, tf.transpose(b)) + \
+ tf.reshape(sq_sum_a, (-1, 1)) + tf.reshape(sq_sum_b, (1, -1))
+
+
+def cosine_distance(a, b=None):
+ """Compute element-wise cosine distance between `a` and `b`.
+
+ Parameters
+ ----------
+ a : tf.Tensor
+ A matrix of shape NxL with N row-vectors of dimensionality L.
+ b : tf.Tensor
+ A matrix of shape NxL with N row-vectors of dimensionality L.
+
+ Returns
+ -------
+ tf.Tensor
+ A matrix of shape NxM where element (i, j) contains the cosine distance
+ between elements `a[i]` and `b[j]`.
+
+ """
+ a_normed = tf.nn.l2_normalize(a, dim=1)
+ b_normed = a_normed if b is None else tf.nn.l2_normalize(b, dim=1)
+ return (
+ tf.constant(1.0, tf.float32) -
+ tf.matmul(a_normed, tf.transpose(b_normed)))
+
+
+def recognition_rate_at_k(probe_x, probe_y, gallery_x, gallery_y, k,
+ measure=pdist):
+ """Compute the recognition rate at a given level `k`.
+
+ For a given probe and ranked gallery that is sorted according to a distance
+ measure `measure` in descending order, the recognition rate at `k` is::
+
+ recognition_rate_at_k = num_correct / min(k, num_relevant)
+
+ where num_correct refers to the fraction of images in the top k entries of
+ the ranked gallery that have the same label as the probe and `num_relevant`
+ refers to the total number of elements in the gallery that have the same
+ label.
+
+ Parameters
+ ----------
+ probe_x: tf.Tensor
+ A tensor of probe images.
+ probe_y: tf.Tensor
+ A tensor of probe labels.
+ gallery_x: tf.Tensor
+ A tensor of gallery images.
+ gallery_y: tf.Tensor
+ A tensor of gallery labels.
+ k: int
+ See description above.
+ measure: Callable[tf.Tensor, tf.Tensor] -> tf.Tensor
+ A callable that computes for two matrices of row-vectors a matrix of
+ element-wise distances. See `pdist` for an example.
+
+ Returns
+ -------
+ tf.Tensor
+ Returns a scalar tensor which represents the computed metric.
+
+ """
+ # Build a matrix of shape (num_probes, num_gallery_images) where element
+ # (i, j) is 1 if probe image i and the gallery image j have the same
+ # identity, otherwise 0.
+ label_eq_mat = tf.cast(tf.equal(tf.reshape(
+ probe_y, (-1, 1)), tf.reshape(gallery_y, (1, -1))),
+ tf.float32)
+
+ # For each probe image, compute the number of relevant images in the
+ # gallery (same identity). This should always be one for CMC evaluation
+ # because we always have exactly one probe and one gallery image for each
+ # identity.
+ num_relevant = tf.minimum(tf.cast(k, tf.float32), tf.reduce_sum(
+ label_eq_mat, reduction_indices=[1]))
+
+ # Rank gallery images by the similarity measure to build a matrix of
+ # shape (num_probes, k) where element (i, j) contains the label of the
+ # j-th ranked gallery image for probe i.
+ predictions = tf.exp(-measure(probe_x, gallery_x)) # Compute similarity.
+ _, prediction_indices = tf.nn.top_k(predictions, k=k)
+ label_mat = tf.gather(gallery_y, prediction_indices)
+
+ # Just as we have done before, build a matrix where element (i, j) is
+ # one if probe i and gallery image j share the same label (same identity).
+ # This time, the matrix is ranked by the similarity measure and we only
+ # keep the top-k predictions.
+ label_eq_mat = tf.cast(tf.equal(
+ label_mat, tf.reshape(probe_y, (-1, 1))), tf.float32)
+
+ # Compute the number of true positives in [0, k[, i.e., check if we find
+ # the correct gallery image within the top-k ranked results. Then, compute
+ # the recognition rate, which in our case is either 0 or 1 since we have
+ # only one gallery image that shares the same identity with the probe.
+ #
+ # This is the final output of our CMC metric.
+ true_positives_at_k = tf.reduce_sum(label_eq_mat, reduction_indices=[1])
+ return true_positives_at_k / num_relevant
+
+
+def streaming_mean_cmc_at_k(probe_x, probe_y, gallery_x, gallery_y, k,
+ measure=pdist):
+ """Compute cumulated matching characteristics (CMC) at level `k` over
+ a stream of data (i.e., multiple batches).
+
+ The function is compatible with TensorFlow-Slim's streaming metrics
+ interface, e.g., `slim.metrics.aggregate_metric_map`.
+
+ Parameters
+ ----------
+ probe_x: tf.Tensor
+ A tensor of probe images.
+ probe_y: tf.Tensor
+ A tensor of probe labels.
+ gallery_x: tf.Tensor
+ A tensor of gallery images.
+ gallery_y: tf.Tensor
+ A tensor of gallery labels.
+ k: int
+ See description above.
+ measure: Callable[tf.Tensor, tf.Tensor] -> tf.Tensor
+ A callable that computes for two matrices of row-vectors a matrix of
+ element-wise distances. See `pdist` for an example.
+
+ Returns
+ -------
+ Tuple[tf.Tensor, tf.Tensor]
+ The first element in the tuple is the current result. The second element
+ is an operation that updates the computed metric based on new data.
+
+ """
+ recognition_rate = recognition_rate_at_k(
+ probe_x, probe_y, gallery_x, gallery_y, k, measure)
+ return slim.metrics.streaming_mean(recognition_rate)
+
+
+def streaming_mean_averge_precision(probe_x, probe_y, gallery_x, gallery_y,
+ good_mask, measure=pdist):
+ """Compute mean average precision (mAP) over a stream of data.
+
+ Parameters
+ ----------
+ probe_x: tf.Tensor
+ A tensor of N probe images.
+ probe_y: tf.Tensor
+ A tensor of N probe labels.
+ gallery_x: tf.Tensor
+ A tensor of M gallery images.
+ gallery_y: tf.Tensor
+ A tensor of M gallery labels.
+ good_mask: Optional[tf.Tensor]
+ A matrix of shape NxM where element (i, j) evaluates to 0.0 if the pair
+ of i-th probe and j-th gallery image should be excluded from metric
+ computation. All other elements should evaluate to 1.0.
+ measure: Callable[tf.Tensor, tf.Tensor] -> tf.Tensor
+ A callable that computes for two matrices of row-vectors a matrix of
+ element-wise distances. See `pdist` for an example.
+
+ Returns
+ -------
+ Tuple[tf.Tensor, tf.Tensor]
+ The first element in the tuple is the current result. The second element
+ is an operation that updates the computed metric based on new data.
+
+ """
+ # See Wikipedia:
+ # https://en.wikipedia.org/wiki/Information_retrieval#Average_precision
+ if good_mask.dtype != tf.float32:
+ good_mask = tf.cast(good_mask, tf.float32)
+
+ # Compute similarity measure and mask out diagonal (similarity to self).
+ predictions = good_mask * tf.exp(-measure(probe_x, gallery_x))
+
+ # Compute matrix of predicted labels.
+ k = tf.shape(gallery_y)[0]
+ _, prediction_indices = tf.nn.top_k(predictions, k=k)
+ predicted_label_mat = tf.gather(gallery_y, prediction_indices)
+ label_eq_mat = tf.cast(tf.equal(
+ predicted_label_mat, tf.reshape(probe_y, (-1, 1))), tf.float32)
+
+ # Compute statistics.
+ num_relevant = tf.reduce_sum(
+ good_mask * label_eq_mat, reduction_indices=[1], keep_dims=True)
+ true_positives_at_k = tf.cumsum(label_eq_mat, axis=1)
+ retrieved_at_k = tf.cumsum(tf.ones_like(label_eq_mat), axis=1)
+ precision_at_k = true_positives_at_k / retrieved_at_k
+ relevant_at_k = label_eq_mat
+ average_precision = (
+ tf.reduce_sum(precision_at_k * relevant_at_k, reduction_indices=[1]) /
+ tf.cast(tf.squeeze(num_relevant), tf.float32))
+
+ return slim.metrics.streaming_mean(average_precision)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/__init__.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/.keep b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/__init__.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/network_definition.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/network_definition.py
new file mode 100644
index 0000000000000000000000000000000000000000..005da31b60354eb5eb86bf95697fa8acd42f8be4
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/network_definition.py
@@ -0,0 +1,134 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+
+from . import residual_net
+
+
+def create_network(images, num_classes=None, add_logits=True, reuse=None,
+ create_summaries=True, weight_decay=1e-8):
+ nonlinearity = tf.nn.elu
+ conv_weight_init = tf.truncated_normal_initializer(stddev=1e-3)
+ conv_bias_init = tf.zeros_initializer()
+ conv_regularizer = slim.l2_regularizer(weight_decay)
+ fc_weight_init = tf.truncated_normal_initializer(stddev=1e-3)
+ fc_bias_init = tf.zeros_initializer()
+ fc_regularizer = slim.l2_regularizer(weight_decay)
+
+ def batch_norm_fn(x):
+ return slim.batch_norm(x, scope=tf.get_variable_scope().name + "/bn")
+
+ network = images
+ network = slim.conv2d(
+ network, 32, [3, 3], stride=1, activation_fn=nonlinearity,
+ padding="SAME", normalizer_fn=batch_norm_fn, scope="conv1_1",
+ weights_initializer=conv_weight_init, biases_initializer=conv_bias_init,
+ weights_regularizer=conv_regularizer)
+ if create_summaries:
+ tf.summary.histogram(network.name + "/activations", network)
+ tf.summary.image("conv1_1/weights", tf.transpose(
+ slim.get_variables("conv1_1/weights:0")[0], [3, 0, 1, 2]),
+ max_outputs=128)
+ network = slim.conv2d(
+ network, 32, [3, 3], stride=1, activation_fn=nonlinearity,
+ padding="SAME", normalizer_fn=batch_norm_fn, scope="conv1_2",
+ weights_initializer=conv_weight_init, biases_initializer=conv_bias_init,
+ weights_regularizer=conv_regularizer)
+ if create_summaries:
+ tf.summary.histogram(network.name + "/activations", network)
+
+ network = slim.max_pool2d(
+ network, [3, 3], [2, 2], scope="pool1", padding="SAME")
+
+ network = residual_net.residual_block(
+ network, "conv2_1", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=False, is_first=True,
+ summarize_activations=create_summaries)
+ network = residual_net.residual_block(
+ network, "conv2_3", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=False,
+ summarize_activations=create_summaries)
+
+ network = residual_net.residual_block(
+ network, "conv3_1", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=True,
+ summarize_activations=create_summaries)
+ network = residual_net.residual_block(
+ network, "conv3_3", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=False,
+ summarize_activations=create_summaries)
+
+ network = residual_net.residual_block(
+ network, "conv4_1", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=True,
+ summarize_activations=create_summaries)
+ network = residual_net.residual_block(
+ network, "conv4_3", nonlinearity, conv_weight_init, conv_bias_init,
+ conv_regularizer, increase_dim=False,
+ summarize_activations=create_summaries)
+
+ feature_dim = network.get_shape().as_list()[-1]
+ print("feature dimensionality: ", feature_dim)
+ network = slim.flatten(network)
+
+ network = slim.dropout(network, keep_prob=0.6)
+ network = slim.fully_connected(
+ network, feature_dim, activation_fn=nonlinearity,
+ normalizer_fn=batch_norm_fn, weights_regularizer=fc_regularizer,
+ scope="fc1", weights_initializer=fc_weight_init,
+ biases_initializer=fc_bias_init)
+
+ features = network
+
+ # Features in rows, normalize axis 1.
+ features = tf.nn.l2_normalize(features, dim=1)
+
+ if add_logits:
+ with slim.variable_scope.variable_scope("ball", reuse=reuse):
+ weights = slim.model_variable(
+ "mean_vectors", (feature_dim, int(num_classes)),
+ initializer=tf.truncated_normal_initializer(stddev=1e-3),
+ regularizer=None)
+ scale = slim.model_variable(
+ "scale", (), tf.float32,
+ initializer=tf.constant_initializer(0., tf.float32),
+ regularizer=slim.l2_regularizer(1e-1))
+ if create_summaries:
+ tf.summary.scalar("scale", scale)
+ scale = tf.nn.softplus(scale)
+
+ # Mean vectors in colums, normalize axis 0.
+ weights_normed = tf.nn.l2_normalize(weights, dim=0)
+ logits = scale * tf.matmul(features, weights_normed)
+ else:
+ logits = None
+ return features, logits
+
+
+def create_network_factory(is_training, num_classes, add_logits,
+ weight_decay=1e-8, reuse=None):
+
+ def factory_fn(image):
+ with slim.arg_scope([slim.batch_norm, slim.dropout],
+ is_training=is_training):
+ with slim.arg_scope([slim.conv2d, slim.fully_connected,
+ slim.batch_norm, slim.layer_norm],
+ reuse=reuse):
+ features, logits = create_network(
+ image, num_classes=num_classes, add_logits=add_logits,
+ reuse=reuse, create_summaries=is_training,
+ weight_decay=weight_decay)
+ return features, logits
+
+ return factory_fn
+
+
+def preprocess(image, is_training=False, input_is_bgr=False):
+ if input_is_bgr:
+ image = image[:, :, ::-1] # BGR to RGB
+ image = tf.divide(tf.cast(image, tf.float32), 255.0)
+ if is_training:
+ image = tf.image.random_flip_left_right(image)
+ return image
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/residual_net.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/residual_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c22fb5edb0cb16d3ad69391d58829587c36a5e3
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/nets/deep_sort/residual_net.py
@@ -0,0 +1,86 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import tensorflow as tf
+from tensorflow.contrib import slim
+
+
+def _batch_norm_fn(x, scope=None):
+ if scope is None:
+ scope = tf.get_variable_scope().name + "/bn"
+ return slim.batch_norm(x, scope=scope)
+
+
+def create_link(
+ incoming, network_builder, scope, nonlinearity=tf.nn.elu,
+ weights_initializer=tf.truncated_normal_initializer(stddev=1e-3),
+ regularizer=None, is_first=False, summarize_activations=True):
+ if is_first:
+ network = incoming
+ else:
+ network = _batch_norm_fn(incoming, scope=scope + "/bn")
+ network = nonlinearity(network)
+ if summarize_activations:
+ tf.summary.histogram(scope+"/activations", network)
+
+ pre_block_network = incoming
+ post_block_network = network_builder(network, scope)
+
+ incoming_dim = pre_block_network.get_shape().as_list()[-1]
+ outgoing_dim = post_block_network.get_shape().as_list()[-1]
+ if incoming_dim != outgoing_dim:
+ assert outgoing_dim == 2 * incoming_dim, \
+ "%d != %d" % (outgoing_dim, 2 * incoming)
+ projection = slim.conv2d(
+ incoming, outgoing_dim, 1, 2, padding="SAME", activation_fn=None,
+ scope=scope+"/projection", weights_initializer=weights_initializer,
+ biases_initializer=None, weights_regularizer=regularizer)
+ network = projection + post_block_network
+ else:
+ network = incoming + post_block_network
+ return network
+
+
+def create_inner_block(
+ incoming, scope, nonlinearity=tf.nn.elu,
+ weights_initializer=tf.truncated_normal_initializer(1e-3),
+ bias_initializer=tf.zeros_initializer(), regularizer=None,
+ increase_dim=False, summarize_activations=True):
+ n = incoming.get_shape().as_list()[-1]
+ stride = 1
+ if increase_dim:
+ n *= 2
+ stride = 2
+
+ incoming = slim.conv2d(
+ incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
+ normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
+ biases_initializer=bias_initializer, weights_regularizer=regularizer,
+ scope=scope + "/1")
+ if summarize_activations:
+ tf.summary.histogram(incoming.name + "/activations", incoming)
+
+ incoming = slim.dropout(incoming, keep_prob=0.6)
+
+ incoming = slim.conv2d(
+ incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
+ normalizer_fn=None, weights_initializer=weights_initializer,
+ biases_initializer=bias_initializer, weights_regularizer=regularizer,
+ scope=scope + "/2")
+ return incoming
+
+
+def residual_block(incoming, scope, nonlinearity=tf.nn.elu,
+ weights_initializer=tf.truncated_normal_initializer(1e3),
+ bias_initializer=tf.zeros_initializer(), regularizer=None,
+ increase_dim=False, is_first=False,
+ summarize_activations=True):
+
+ def network_builder(x, s):
+ return create_inner_block(
+ x, s, nonlinearity, weights_initializer, bias_initializer,
+ regularizer, increase_dim, summarize_activations)
+
+ return create_link(
+ incoming, network_builder, scope, nonlinearity, weights_initializer,
+ regularizer, is_first, summarize_activations)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/queued_trainer.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/queued_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8939fbe28159e1fcbadfcc68d0033d36d2362dc1
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/queued_trainer.py
@@ -0,0 +1,616 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import string
+import os
+import threading
+import numpy as np
+
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+import time
+
+config = tf.ConfigProto()
+custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+custom_op.name = "NpuOptimizer"
+custom_op.parameter_map["use_off_line"].b = True
+custom_op.parameter_map["mix_compile_mode"].b = True
+custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("force_fp32")#allow_mix_precision
+#custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes("/home/test/ops_info.json")
+config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
+config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF
+
+def run_in_batches(f, data_dict, out, batch_size):
+ """Process data in batches.
+
+ Parameters
+ ----------
+ f : Callable[Dict[tf.Tensor, np.ndarray] -> np.ndarray
+ A function that maps a given input (one or multiple inpu arrays) to a
+ single output array.
+ data_dict : Dict[tf.Tensor, np.ndarray]
+ Maps from symbolic input tensor to numpy data array.
+ out : np.ndarray
+ The computed function output will be stored in this array; must be have
+ compatible shape and length to the output computed by `f`.
+ batch_size : int
+ The number of samples to compute in each call to `f`. If the length of
+ the input array is not divisible by the batch size, the final call to
+ `f` contains fewer examples.
+
+ """
+ data_len = len(out)
+ num_batches = int(data_len / batch_size)
+
+ def pad(x):
+ x = np.asarray(x)
+ y = np.full((batch_size, ) + x.shape[1:], x[0], dtype=x.dtype)
+ y[:x.shape[0]] = x
+ return y
+
+ s, e = 0, batch_size
+ for i in range(num_batches):
+ s, e = i * batch_size, (i + 1) * batch_size
+ batch_data_dict = {k: v[s:e] for k, v in data_dict.items()}
+ out[s:e] = f(batch_data_dict)
+ if e < len(out):
+ remaining_len = len(out) - e
+ batch_data_dict = {k: pad(v[e:]) for k, v in data_dict.items()}
+ out[e:] = f(batch_data_dict)[:remaining_len]
+
+
+def iterate_forever(batch_size, data, *other_data):
+ """Iterate over dataset indefenitely.
+
+ Parameters
+ ----------
+ batch_size : int
+ The batch size.
+ data : ndarray
+ The first input array.
+ other_data
+ Additional input arrays; must be of type np.ndarray.
+
+ Returns
+ -------
+ List[np.ndarray]
+ A dataset batch. The length of each entry in the list is `batch_size`.
+
+ """
+ data_len = len(data)
+ num_batches = int(data_len / batch_size)
+
+ while True:
+ data_list = [data] + list(other_data)
+ s, e = 0, 0
+ for i in range(num_batches):
+ s, e = i * batch_size, (i + 1) * batch_size
+ batch = [x[s:e] for x in data_list]
+ yield batch[0] if len(batch) == 1 else batch
+ if e < data_len:
+ batch = [x[e:] for x in data_list]
+ yield batch[0] if len(batch) == 1 else batch
+
+
+def random_shuffle_forever(batch_size, data, *other_data):
+ """A generator that randomly selects `batch_size` entries from the data.
+
+ Parameters
+ ----------
+ batch_size : int
+ The batch size.
+ data : np.ndarray
+ The first input array.
+ other_data
+ Additional input arrays; must be of type np.ndarray
+
+ Returns
+ -------
+ List[np.ndarray]
+ A batch of randomly selected entries. The length of each entry in the
+ list is `batch_size`.
+
+ """
+ data_list = [data] + list(other_data)
+ indices = np.arange(len(data))
+ while True:
+ batch_indices = np.random.choice(indices, batch_size, replace=False)
+ batch = [x[batch_indices] for x in data_list]
+ yield batch[0] if len(batch) == 1 else batch
+
+
+def random_sample_identities_forever(batch_size, num_samples_per_id, data_x,
+ data_y, num_fa_images=0):
+ """A generator that randomly selects a fixed number of entries per label.
+
+ If false alarms are passed into this function, they should have a negative
+ label, i.e., `data_y[i] < 0` if the i-th example corresponds to a false
+ alarm.
+
+ Parameters
+ ----------
+ batch_size : int
+ The batch size.
+ num_samples_per_id : int
+ Number of examples per label in each batch. If the `batch_size` is not
+ divisible by `num_samples_per_id` then the batch is filled with false
+ alarms. A warning is printed if no false alarms are available to fill
+ up the batch.
+ data_x : List[string] | np.ndarray
+ The data array; either a list of filenames or a tensor of input images.
+ data_y : List[int] | np.ndarray
+ The label array (either as list of one-dimensional numpy array).
+ num_fa_images : Optional[int]
+ Number of false alarm images to include in each batch; defaults to zero.
+
+ Returns
+ -------
+ List[np.ndarray]
+ Returns a list of length two where the first entry is the data array
+ corresponding to `data_x` and the second entry is the label array
+ corresponding to `data_y`. The elements in the list are of length
+ `batch_size`.
+
+ """
+ assert (batch_size - num_fa_images) % num_samples_per_id == 0
+ num_ids_per_batch = int((batch_size - num_fa_images) / num_samples_per_id)
+
+ data_x = np.asarray(data_x)
+ data_y = np.asarray(data_y)
+
+ unique_y = np.unique(data_y[data_y >= 0])
+ y_to_idx = {y: np.where(data_y == y)[0] for y in unique_y}
+ fa_indices = np.where(data_y < 0)[0]
+
+ while True:
+ # Draw the desired number of identities.
+ indices = np.random.choice(
+ len(unique_y), num_ids_per_batch, replace=False)
+ batch_unique_y = unique_y[indices]
+
+ batch_x = np.zeros((batch_size, ) + data_x.shape[1:], data_x.dtype)
+ batch_y = np.zeros((batch_size, ), data_y.dtype)
+ e = 0
+ for i, y in enumerate(batch_unique_y):
+ num_samples = min(num_samples_per_id, len(y_to_idx[y]))
+ indices = np.random.choice(y_to_idx[y], num_samples, replace=False)
+ s, e = e, e + num_samples
+ batch_x[s:e] = data_x[indices]
+ batch_y[s:e] = y
+
+ # Fill up remaining space with false alarms.
+ num_samples = len(batch_x) - e
+ if num_fa_images > 0:
+ num_batch_fa_samples = min(num_samples, len(fa_indices))
+ indices = np.random.choice(
+ fa_indices, num_batch_fa_samples, replace=False)
+ s, e = e, e + num_batch_fa_samples
+ batch_x[s:e] = data_x[indices]
+ batch_y[s:e] = data_y[indices]
+
+ # If we need to add more data, random sample ids until we have reached
+ # the batch size.
+ num_samples = len(batch_x) - e
+ num_tries = 0
+ while num_samples > 0 and num_tries < 100:
+ y = np.random.choice(unique_y)
+ if y in batch_unique_y:
+ # Find a target that we have not yet in this batch.
+ num_tries += 1
+ continue
+
+ num_samples = min(num_samples, len(y_to_idx[y]))
+ indices = np.random.choice(y_to_idx[y], num_samples, replace=False)
+ s, e = e, e + num_samples
+ batch_x[s:e] = data_x[indices]
+ batch_y[s:e] = y
+ num_samples = len(batch_x) - e
+
+ if e < batch_size:
+ print("ERROR: Failed to sample a full batch. Adding corrupt data.")
+ yield [batch_x, batch_y]
+
+
+def _truncate_dataset_to_batch_size(batch_size, data, *other_data):
+ """Truncate given input data to a multiple of the batch size.
+
+ Parameters
+ ----------
+ batch_size : int
+ The batch size. The length of the truncated data is a multiple of this
+ value.
+ data : np.ndarray
+ The first input array.
+ *other_data
+ Additional input arrays; must be of type np.ndarray.
+
+ Returns
+ -------
+ List[np.ndarray]
+ The truncated data. The length of each entry in the list is a multiple
+ of the batch size.
+
+ """
+ num_batches = int(len(data) / batch_size)
+ new_len = num_batches * batch_size
+ dataset = [data] + list(other_data)
+ if new_len < len(data):
+ print(
+ "WARNING dataset length is not a multiple of batch size. "
+ "Truncating from %d to %d." % (len(data), new_len))
+ dataset = [x[:new_len] for x in dataset]
+ return num_batches, dataset[0] if len(dataset) == 1 else dataset
+
+
+def _generate_run_id(size=6, chars=None):
+ """Generate a random ID of length `size`.
+
+ Parameters
+ ----------
+ size : int
+ chars : Optional[str]
+ Optional list of characters to use for generating the ID.
+
+ Returns
+ -------
+ str
+ Returns a random identifier of length `size`.
+
+ """
+ if chars is None:
+ chars = string.ascii_uppercase + string.digits
+ import random
+ return ''.join(random.choice(chars) for _ in range(size))
+
+
+class ThreadSafeIterator(object):
+ """
+ This class wraps an iterator (or generator) such that only one thread at a
+ time is granted access.
+
+ Parameters
+ ----------
+ iterator_or_generator
+ An iterator or generator to be wrapped.
+
+ """
+
+ def __init__(self, iterator_or_generator):
+ self._iterator_or_generator = iterator_or_generator
+ self._lock = threading.Lock()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ with self._lock:
+ return next(self._iterator_or_generator)
+
+ def next(self):
+ with self._lock:
+ return self._iterator_or_generator.next()
+
+
+class QueuedTrainer(object):
+ """
+ This class implements code to train and evaluate TensorFlow models based on
+ TensorFlow-Slim. Image loading and preprocessing is de-coupled from the
+ training steps using a tf.FIFOQueue.
+
+ Parameters
+ ----------
+ enqueue_vars : List[tf.Tensor]
+ A list of tensors to be enqueued; usually the labels and preprocessed
+ images.
+ input_vars : Optional[List[tf.Tensor]]
+ An optional list of input tensors; usually the labels and raw (not
+ preprocessed) images or filenames to the images. The list must be of the
+ same length as the `enqueue_vars` and there must be a one-to-one
+ correspondence, i.e., the i-th element in `enqueue_vars` is i-th
+ preprocessed element in `input_vars`. If None, the input_vars are set to
+ `enqueue_vars`.
+ num_enqueue_threads : Optional[int]
+ Number of threads used to preprocess data in parallel.
+ queue_capacity : Optional[int]
+ Maximum number of elements in the queue; defaults to 512.
+
+ """
+
+ def __init__(self, enqueue_vars, input_vars=None, num_enqueue_threads=4,
+ queue_capacity=512):
+ if input_vars is None:
+ input_vars = enqueue_vars
+ self._input_vars = input_vars
+ self._enqueue_vars = enqueue_vars
+
+ shapes = [var.get_shape().as_list()[1:] for var in enqueue_vars]
+ dtypes = [var.dtype for var in enqueue_vars]
+ self._queue = tf.FIFOQueue(queue_capacity, dtypes, shapes)
+
+ self._num_enqueue_threads = num_enqueue_threads
+ self._enqueue_threads = []
+ self._enqueue_op = self._queue.enqueue_many(self._enqueue_vars)
+ self._stop_op = self._queue.close(cancel_pending_enqueues=True)
+ self._coordinator = None
+
+ self._feed_generator = None
+ self._batch_size = None
+ self._init_fns = []
+
+ def get_input_vars(self, batch_size):
+ """Get the top `batch_size` elements from the queue. The tensors
+ returned by this functions should be passed on the the TensorFlow model.
+
+ Parameters
+ ----------
+ batch_size : int
+ The batch size.
+
+ Returns
+ -------
+ List[tf.Tensor]
+ Returns the top `batch_size` elements from the queue. There is a
+ one-to-one correspondence between the `enqueue_vars` passed in to
+ the constructor of this class and the tensors in the list returned
+ by this function.
+
+ """
+ self._batch_size = batch_size
+ return self._queue.dequeue_many(batch_size)
+
+ def run(self, feed_generator, train_op, log_dir="/tmp/slim_trainer/",
+ restore_path=None, variables_to_restore=None, run_id=None,
+ max_checkpoints_to_keep=0, **kwargs):
+ """ Run training.
+
+ Parameters
+ ----------
+ feed_generator : Iterator[ndarray, ...]
+ An iterator or generator that returns batches of training data; must
+ return a one-to-one correspondence with the `enqueue_vars` passed
+ to the constructor of this class.
+ train_op : tf.Tensor
+ The training operation created with `slim.learning.create_train_op`.
+ log_dir : Optional[str]
+ Path to TensorFlow log directory. This value is used in conjunction
+ with `run_id` to generate the checkpoint and summary directory;
+ defaults to '/tmp/slim_trainer'.
+ restore_path : Optional[str]
+ An optional checkpoint path. If not None, resumes training from the
+ given checkpoint.
+ variables_to_restore : Optional[List[str]]
+ An optional list of variable scopes. If not None, only restores
+ variables under the given scope. This value is ignored if
+ `restore_path` is None.
+ run_id : Optional[str]
+ A string that identifies this training run. The checkpoints and
+ TensorFlow summaries are stored in `log_dir/run_id`. If None, a
+ random ID will be generated. Point tensorboard to this directory to
+ monitor training progress.
+ max_checkpoints_to_keep : int
+ Keep only the `max_checkpoints_to_keep` newest checkpoints. If 0,
+ keep all checkpoints.
+ kwargs:
+ Additional named arguments passed on to tf.slim.learning.train,
+ e.g., `number_of_steps=100` to run 100 iterations of training.
+
+ """
+ if restore_path is not None:
+ if variables_to_restore is None:
+ variables_to_restore = slim.get_variables_to_restore()
+ init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
+ restore_path, variables_to_restore)
+ self._init_fns.append(lambda sess: sess.run(
+ init_assign_op, init_feed_dict))
+ self._feed_generator = ThreadSafeIterator(feed_generator)
+ self._coordinator = tf.train.Coordinator()
+
+ if run_id is None:
+ run_id = _generate_run_id(6)
+ log_dir = os.path.join(log_dir, run_id)
+ print("---------------------------------------")
+ print("Run ID: ", run_id)
+ print("Log directory: ", log_dir)
+ print("---------------------------------------")
+
+ saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
+ try:
+ slim.learning.train(
+ train_op, log_dir, self._train_step_fn, session_config=config, saver=saver,
+ **kwargs)
+ except UnboundLocalError:
+ # NOTE(nwojke): Due to a bug in slim, a local variable 'total_loss'
+ # is referenced when an exception is raised during training. We
+ # catch the exception here because it occurs whenever we close the
+ # queue with self._stop_all_threads().
+ pass
+ self._wait_for_threads()
+
+ def evaluate(self, dataset, checkpoint_dir, log_dir, run_id=None,
+ init_op=None, eval_op=None, final_op=None,
+ summary_op=None, variables_to_restore=None,
+ eval_interval_secs=60, max_num_evaluations=None):
+ """Run evaluation. Monitors files in the log directory and computes
+ evaluation metrics. This function must be called concurrently to
+ training (in a separate process).
+
+ WARNING: The dataset is truncated to the batch size. Thus, the computed
+ metrics are only accurate if the dataset length is divisible by the
+ batch size.
+
+ Parameters
+ ----------
+ dataset : List[T]
+ The dataset is a list (or tuple) of data arrays. The length of the
+ list must be the same as the `input_vars` passed to the constructor
+ of this class and there must be a one-to-one correspondence such
+ that `dataset[i]` corresponds to the numeric data of its symbolic
+ equivalent in `input_vars[i]`.
+ checkpoint_dir : str
+ The directory where checkpoints are stored. Should be set to
+ `log_dir` of the training process.
+ log_dir : str
+ Path to TensorFlow log directory where evaluation logs will be
+ stored. This directory should be different from the `log_dir`
+ passed to `run`.
+ run_id : Optional[str]
+ A string that identifies the training runrun. Should be set to
+ `run_id` passed to `run`.
+ init_op : Optional[tf.Tensor]
+ Optional operation to execute prior to processing the `dataset`.
+ eval_op : Optional[tf.Tensor]
+ Evaluation operation; will be executed for each batch in the
+ `dataset`.
+ final_op : Optional[tf.Tensor]
+ Optional operation to execute after processing the `dataset`.
+ summary_op : Optional[tf.Tensor]
+ Summary operation; defaults to `tf.summary.merge_all()`.
+ variables_to_restore : Optional[List[tf.Tensor]]
+ List of variables to restore; defaults to
+ `slim.get_variables_to_restore()`.
+ eval_interval_secs : Optional[int]
+ Poll the `checkpoint_dir` every `eval_interval_secs` seconds for
+ new checkpoints.
+ max_num_evaluations : Optional[int]
+ Evaluate at most `max_num_evaluations` checkpoints.
+
+ Returns
+ -------
+ T
+ Returns the value of the last call to `final_op` or None.
+
+ """
+ if run_id is None:
+ print("---------------------------------------")
+ print("Checkpoint directory: ", checkpoint_dir)
+ print("Log directory: ", log_dir)
+ print("---------------------------------------")
+ else:
+ checkpoint_dir = os.path.join(checkpoint_dir, run_id)
+ log_dir = os.path.join(log_dir, run_id)
+ print("---------------------------------------")
+ print("Run ID: ", run_id)
+ print("Checkpoint directory: ", checkpoint_dir)
+ print("Log directory: ", log_dir)
+ print("---------------------------------------")
+
+ if summary_op is None:
+ summary_op = tf.summary.merge_all()
+
+ global_step = tf.train.get_or_create_global_step()
+
+ if variables_to_restore is None:
+ variables_to_restore = slim.get_variables_to_restore()
+ saver = tf.train.Saver(variables_to_restore)
+ summary_writer = tf.summary.FileWriter(log_dir)
+ sv = tf.train.Supervisor(
+ graph=tf.get_default_graph(), logdir=log_dir, summary_op=None,
+ summary_writer=None, global_step=None, saver=saver)
+
+ print("Entering evaluation loop. Waiting for checkpoints.")
+ num_batches, dataset = _truncate_dataset_to_batch_size(
+ self._batch_size, *dataset)
+
+ final_op_value = None
+ num_evaluations = 0
+ for checkpoint_path in slim.evaluation.checkpoints_iterator(
+ checkpoint_dir, eval_interval_secs):
+ with sv.managed_session(start_standard_services=False) as session:
+ sv.saver.restore(session, checkpoint_path)
+ sv.start_queue_runners(session)
+
+ print("Starting evaluation of '%s'" % checkpoint_path)
+ self._feed_generator = iterate_forever(
+ self._batch_size, *dataset)
+ self._coordinator = tf.train.Coordinator()
+ for fn in self._init_fns:
+ fn(session)
+ self._start_enqueue(session, num_threads=1)
+
+ if init_op is not None:
+ session.run(init_op)
+
+ if eval_op is not None:
+ for i in range(num_batches):
+ session.run(eval_op)
+
+ if final_op is not None:
+ final_op_value = session.run(final_op)
+ else:
+ final_op_value = None
+
+ summary_str = session.run(summary_op)
+ global_step_value = session.run(global_step)
+ summary_writer.add_summary(summary_str, global_step_value)
+ summary_writer.flush()
+
+ self._stop_all_threads(session)
+ print("Finished evaluation of '%s'" % checkpoint_path)
+
+ num_evaluations += 1
+ if max_num_evaluations is not None \
+ and num_evaluations >= max_num_evaluations:
+ break
+ return final_op_value
+
+ def _train_step_fn(self, session, train_op, global_step,
+ train_step_kwargs):
+ if len(self._enqueue_threads) == 0:
+ for fn in self._init_fns:
+ fn(session)
+ self._start_enqueue(session)
+ train_start = time.time()
+ total_loss, should_stop = slim.learning.train_step(
+ session, train_op, global_step, train_step_kwargs)
+ print("========each step time: {:.2f}".format((time.time() - train_start)*1000))
+ if should_stop or self._coordinator.should_stop():
+ self._stop_all_threads(session)
+ print("========loss: ", total_loss)
+ return total_loss, should_stop
+
+ def _stop_all_threads(self, session):
+ self._coordinator.request_stop()
+ session.run(self._stop_op) # Close the queue.
+
+ def _wait_for_threads(self):
+ self._coordinator.join(self._enqueue_threads)
+ self._enqueue_threads = []
+
+ def _start_enqueue(self, session, num_threads=None):
+ if num_threads is None:
+ num_threads = self._num_enqueue_threads
+ for _ in range(num_threads):
+ thread = threading.Thread(
+ target=self._run_enqueue_thread, args=(session, ))
+ thread.start()
+ self._enqueue_threads.append(thread)
+
+ def _run_enqueue_thread(self, session):
+ try:
+ for data in self._feed_generator:
+ if self._coordinator.should_stop():
+ break
+ try:
+ feed_dict = {
+ var: value for var, value in
+ zip(self._input_vars, data)}
+ session.run(self._enqueue_op, feed_dict=feed_dict)
+ except (tf.errors.CancelledError, tf.errors.AbortedError):
+ # We have been requested to stop enqueuing data.
+ break
+ except Exception as e:
+ print("EnqueueError:", e)
+ self._stop_all_threads(session)
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/run_1p.sh b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/run_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..260e3c52580e92b7317a3df3bc3831715bd30bde
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/run_1p.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+### Do not need to Configure CANN Environment on Modelarts Platform, because it has been set already.
+### Modelarts Platform command for train
+export TF_CPP_MIN_LOG_LEVEL=2 ## Tensorflow api print Log Config
+export ASCEND_SLOG_PRINT_TO_STDOUT=0 ## Print log on terminal on(1), off(0)
+
+code_dir=${1}
+data_dir=${2}
+result_dir=${3}
+obs_url=${4}
+
+current_time=`date "+%Y-%m-%d-%H-%M-%S"`
+
+python3.7 ${code_dir}/train_market1501.py \
+ --dataset_dir=${data_dir} \
+ #--result=${result_dir} \
+ #--obs_dir=${obs_url} \
+ #--chip='npu' \
+ #--loss_mode=cosine-softmax \
+ --run_id=cosine-softmax \
+ #--platform='modelarts' \
+ #--npu_dump_data=False \
+ #--npu_dump_graph=False \
+ #--npu_profiling=False \
+ #--npu_auto_tune=False 2>&1 | tee ${result_dir}/${current_time}_train_npu.log
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_1p.sh b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..80fd6a6c6d96415f4789b76a2ad99419df121f04
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_1p.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+### Do not need to Configure CANN Environment on Modelarts Platform, because it has been set already.
+### Modelarts Platform command for train
+export TF_CPP_MIN_LOG_LEVEL=2 ## Tensorflow api print Log Config
+export ASCEND_SLOG_PRINT_TO_STDOUT=0 ## Print log on terminal on(1), off(0)
+
+code_dir=${1}
+data_dir=${2}
+result_dir=${3}
+obs_url=${4}
+
+current_time=`date "+%Y-%m-%d-%H-%M-%S"`
+
+python3.7 ${code_dir}/train_market1501.py
+ #--dataset_dir=${data_dir} \
+ #--result=${result_dir} \
+ #--obs_dir=${obs_url} \
+ #--chip='npu' \
+ #--loss_mode=cosine-softmax \
+ #--run_id=cosine-softmax \
+ #--platform='modelarts' \
+ #--npu_dump_data=False \
+ #--npu_dump_graph=False \
+ #--npu_profiling=False \
+ #--npu_auto_tune=False 2>&1 | tee ${result_dir}/${current_time}_train_npu.log
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_apulis.sh b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_apulis.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d3c278f85a26bda7500baf564cb49d001ab9c105
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/scripts/run_apulis.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+### Apulis Platform command for train (CANN Version-20.2)
+## Set Ascend Log Level, if u wanna to print on terminal, you should open 'ASCEND_SLOG_PRINT_TO_STDOUT'.
+## refer to link: https://support.huaweicloud.com/Graphdevg-cann202training1/atlasag_25_0077.html
+export ASCEND_SLOG_PRINT_TO_STDOUT=0 ## Print log on terminal on(1), off(0)
+export ASCEND_GLOBAL_LOG_LEVEL=3 ## Ascend log level. debug(0), info(1), warning(2), error(3)
+export TF_CPP_MIN_LOG_LEVEL=2 ## Tensorflow api print Log Config
+
+## Configure Environment for Auto Tune
+## refer to link:https://support.huaweicloud.com/developmenttg-cann330alphaXtraining/atlasautotune_16_0014.html
+export install_path=/home/HwHiAiUser/Ascend/ascend-toolkit/latest
+export PATH=${install_path}/fwkacllib/bin:$PATH
+export LD_LIBRARY_PATH=${install_path}/fwkacllib/lib64:$LD_LIBRARY_PATH
+export PYTHONPATH=${install_path}/fwkacllib/python/site-packages:$PYTHONPATH
+
+code_dir=$(cd "$(dirname "$0")"; cd ..; pwd)
+echo "===>>>Python boot file dir: ${code_dir}"
+
+current_time=`date "+%Y-%m-%d-%H-%M-%S"`
+
+python3.7 ${code_dir}/train.py \
+ --chip='npu' \
+ --dataset=/data/dataset/storage/flowers/images \
+ --result=./log \
+ --num_classes=5 \
+ --train_step=2 \
+ --npu_dump_data=False \
+ --npu_dump_graph=False \
+ --npu_profiling=False \
+ --npu_auto_tune=False 2>&1 | tee ${code_dir}/${current_time}_train_npu.log
\ No newline at end of file
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_app.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0da1c216eb3fbac37362936d3c85eb130df942d
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_app.py
@@ -0,0 +1,690 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import argparse
+import numpy as np
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+
+from datasets import util
+import queued_trainer
+import metrics
+import losses
+
+
+def create_default_argument_parser(dataset_name):
+ """Create an argument parser with default arguments.
+
+ Parameters
+ ----------
+ dataset_name : str
+ Name of the dataset. This value is used to set default directories.
+
+ Returns
+ -------
+ argparse.ArgumentParser
+ Returns an argument parser with default arguments.
+
+ """
+ parser = argparse.ArgumentParser(
+ description="Metric trainer (%s)" % dataset_name)
+ parser.add_argument(
+ "--batch_size", help="Training batch size", default=128, type=int)
+ parser.add_argument(
+ "--learning_rate", help="Learning rate", default=1e-3, type=float)
+ parser.add_argument(
+ "--eval_log_dir",
+ help="Evaluation log directory (only used in mode 'evaluation').",
+ default="/tmp/%s_evaldir" % dataset_name)
+ parser.add_argument(
+ "--number_of_steps", help="Number of train/eval steps. If None given, "
+ "runs indefenitely", default=None, type=int)
+ parser.add_argument(
+ "--log_dir", help="Log and checkpoints directory.",
+ default="/tmp/%s_logdir" % dataset_name)
+ parser.add_argument(
+ "--loss_mode", help="One of 'cosine-softmax', 'magnet', 'triplet'",
+ type=str, default="cosine-softmax")
+ parser.add_argument(
+ "--mode", help="One of 'train', 'eval', 'finalize', 'freeze'.",
+ type=str, default="train")
+ parser.add_argument(
+ "--restore_path", help="If not None, resume training of a given "
+ "checkpoint (mode 'train').", default=None)
+ parser.add_argument(
+ "--run_id", help="An optional run-id. If None given, a new one is "
+ "created", type=str, default=None)
+ return parser
+
+
+def to_train_kwargs(args):
+ """Parse command-line training arguments.
+
+ Parameters
+ ----------
+ args : argparse.Namespace
+ Namespace of an argument parser that was created with
+ create_default_argument_parser.
+
+ Returns
+ -------
+ Dict[str, T]
+ Returns a dictionary of named arguments to be passed on to
+ train_loop.
+
+ """
+ kwargs_dict = {
+ "batch_size": args.batch_size,
+ "learning_rate": args.learning_rate,
+ "log_dir": args.log_dir,
+ "loss_mode": args.loss_mode,
+ "number_of_steps": args.number_of_steps,
+ "restore_path": args.restore_path,
+ "run_id": args.run_id,
+ }
+ return kwargs_dict
+
+
+def to_eval_kwargs(args):
+ """Parse command-line evaluation arguments.
+
+ Parameters
+ ----------
+ args : argparse.Namespace
+ Namespace of an argument parser that was created with
+ create_default_argument_parser.
+
+ Returns
+ -------
+ Dict[str, T]
+ Returns a dictionary of named arguments to be passed on to
+ eval_loop.
+
+ """
+ kwargs_dict = {
+ "eval_log_dir": args.eval_log_dir,
+ "log_dir": args.log_dir,
+ "loss_mode": args.loss_mode,
+ "run_id": args.run_id,
+ }
+ return kwargs_dict
+
+
+def train_loop(preprocess_fn, network_factory, train_x, train_y,
+ num_images_per_id, batch_size, log_dir, image_shape=None,
+ restore_path=None, exclude_from_restore=None, run_id=None,
+ number_of_steps=None, loss_mode="cosine-softmax",
+ learning_rate=1e-3, trainable_scopes=None,
+ save_summaries_secs=60, save_interval_secs=300):
+ """Start training.
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ train_x : List[str] | np.ndarray
+ A list of image filenames or a tensor of images.
+ train_y : List[int] | np.ndarray
+ A list or one-dimensional array of labels for the images in `train_x`.
+ num_images_per_id : int
+ Sample `num_images_per_id` images for each label at each training
+ iteration. The number of identities sampled at each iteration is
+ computed as `batch_size / num_images_per_id`. The `batch_size` must be
+ divisible by this number.
+ batch_size : int
+ The number of images at each training iteration.
+ log_dir : str
+ Used to construct the log and checkpoint directory. They are stored in
+ `log_dir/run_id`.
+ image_shape : Tuple[int, int, int] | NoneType
+ Image shape (height, width, channels) or None. If None, `train_x` must
+ be an array of images such that the shape can be queries from this
+ variable.
+ restore_path : Optional[str]
+ If not None, resumes training from the given checkpoint file.
+ exclude_from_restore : Optional[List[str]]
+ An optional list of variable scopes to be used in conjunction with
+ `restore_path`. If not None, variables in the given scopes are not
+ restored from the checkpoint file.
+ run_id : Optional[str]
+ A string that identifies the training run; used to construct the
+ log and checkpoint directory `log_dir/run_id`. If None, a random
+ string is created.
+ number_of_steps : Optional[int]
+ The total number of training iterations. If None, training runs
+ indefenitely.
+ loss_mode : Optional[str]
+ A string that identifies the loss function used for training; must be
+ one of 'cosine-softmax', 'magnet', 'triplet'. This value defaults to
+ 'cosine-softmax'.
+ learning_rate : Optional[float]
+ Adam learning rate; defaults to 1e-3.
+ trainable_scopes : Optional[List[str]]
+ Optional list of variable scopes. If not None, only variables within the
+ given scopes are trained. Otherwise all variables are trained.
+ save_summaries_secs : Optional[int]
+ Save training summaries every `save_summaries_secs` seconds to the
+ log directory.
+ save_interval_secs : Optional[int]
+ Save checkpoints every `save_interval_secs` seconds to the log
+ directory.
+
+ """
+ if image_shape is None:
+ # If image_shape is not set, train_x must be an image array. Here we
+ # query the image shape from the array of images.
+ assert type(train_x) == np.ndarray
+ image_shape = train_x.shape[1:]
+ elif type(train_x) == np.ndarray:
+ assert train_x.shape[1:] == image_shape
+ read_from_file = type(train_x) != np.ndarray
+
+ trainer, train_op = create_trainer(
+ preprocess_fn, network_factory, read_from_file, image_shape, batch_size,
+ loss_mode, learning_rate=learning_rate,
+ trainable_scopes=trainable_scopes)
+
+ feed_generator = queued_trainer.random_sample_identities_forever(
+ batch_size, num_images_per_id, train_x, train_y)
+
+ variables_to_restore = slim.get_variables_to_restore(
+ exclude=exclude_from_restore)
+ trainer.run(
+ feed_generator, train_op, log_dir, restore_path=restore_path,
+ variables_to_restore=variables_to_restore,
+ run_id=run_id, save_summaries_secs=save_summaries_secs,
+ save_interval_secs=save_interval_secs, number_of_steps=number_of_steps)
+
+
+def create_trainer(preprocess_fn, network_factory, read_from_file, image_shape,
+ batch_size, loss_mode, learning_rate=1e-3,
+ trainable_scopes=None):
+ """Create trainer.
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ read_from_file:
+ Set to True if images are read from file. If False, the trainer expects
+ input images as numpy arrays (i.e., data loading must be handled outside
+ of the trainer).
+ image_shape: Tuple[int, int, int]
+ Image shape (height, width, channels).
+ batch_size:
+ Number of images per batch.
+ loss_mode : str
+ One of 'cosine-softmax', 'magnet', 'triplet'. If 'cosine-softmax', the
+ logits tensor returned by the `network_factory` must not be None.
+ learning_rate: float
+ Adam learning rate; defauls to 1e-3.
+ trainable_scopes: Optional[List[str]]
+ Optional list of variable scopes. If not None, only variables within the
+ given scopes are trained. Otherwise all variables are trained.
+
+ Returns
+ -------
+ QueuedTrainer
+ Returns a trainer object to be used for training and evaluating the
+ given TensorFlow model.
+
+ """
+ num_channels = image_shape[-1] if len(image_shape) == 3 else 1
+
+ with tf.device('/cpu:0'):
+ label_var = tf.placeholder(tf.int64, (None,))
+
+ if read_from_file:
+ # NOTE(nwojke): tf.image.decode_jpg handles various image types.
+ filename_var = tf.placeholder(tf.string, (None, ))
+ image_var = tf.map_fn(
+ lambda x: tf.image.decode_jpeg(
+ tf.read_file(x), channels=num_channels),
+ filename_var, back_prop=False, dtype=tf.uint8)
+ image_var = tf.image.resize_images(image_var, image_shape[:2])
+ input_vars = [filename_var, label_var]
+ else:
+ image_var = tf.placeholder(tf.uint8, (None,) + image_shape)
+ input_vars = [image_var, label_var]
+
+ enqueue_vars = [
+ tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=True),
+ image_var, back_prop=False, dtype=tf.float32),
+ label_var]
+
+ trainer = queued_trainer.QueuedTrainer(enqueue_vars, input_vars)
+ image_var, label_var = trainer.get_input_vars(batch_size)
+ tf.summary.image("images", image_var)
+
+ feature_var, logit_var = network_factory(image_var)
+ _create_loss(feature_var, logit_var, label_var, mode=loss_mode)
+
+ if trainable_scopes is None:
+ variables_to_train = tf.trainable_variables()
+ else:
+ variables_to_train = []
+ for scope in trainable_scopes:
+ variables = tf.get_collection(
+ tf.GraphKeys.TRAINABLE_VARIABLES, scope)
+ variables_to_train.extend(variables)
+
+ global_step = tf.train.get_or_create_global_step()
+
+ loss_var = tf.losses.get_total_loss()
+
+ train_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
+
+ opt_tmp = train_optimizer
+
+ loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=2**32, incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
+ opt_tmp = NPULossScaleOptimizer(opt_tmp, loss_scale_manager)
+
+ #train_op = slim.learning.create_train_op(
+ # loss_var, tf.train.AdamOptimizer(opt_tmp),
+ # global_step, summarize_gradients=False,
+ # variables_to_train=variables_to_train)
+
+ train_op = slim.learning.create_train_op(
+ loss_var, opt_tmp,
+ global_step, summarize_gradients=False,
+ variables_to_train=variables_to_train)
+
+ #train_op = slim.learning.create_train_op(
+ # loss_var, npu_tf_optimizer(tf.train.AdamOptimizer(learning_rate=learning_rate)),
+ # global_step, summarize_gradients=False,
+ # variables_to_train=variables_to_train)
+ tf.summary.scalar("total_loss", loss_var)
+ tf.summary.scalar("learning_rate", learning_rate)
+
+
+ regularization_var = tf.reduce_sum(tf.losses.get_regularization_loss())
+ tf.summary.scalar("weight_loss", regularization_var)
+ return trainer, train_op
+
+
+def eval_loop(preprocess_fn, network_factory, data_x, data_y, camera_indices,
+ log_dir, eval_log_dir, image_shape=None, run_id=None,
+ loss_mode="cosine-softmax", num_galleries=10, random_seed=4321):
+ """Evaluate a running training session using CMC metric averaged over
+ `num_galleries` galleries where each gallery contains for every identity a
+ randomly selected image-pair.
+
+ A call to this function will block indefinitely, monitoring the
+ `log_dir/run_id` for saved checkpoints. Then, creates summaries in
+ `eval_log_dir/run_id` that can be monitored with tensorboard.
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ data_x : List[str] | np.ndarray
+ A list of image filenames or a tensor of images.
+ data_y : List[int] | np.ndarray
+ A list or one-dimensional array of labels for the images in `data_x`.
+ camera_indices: Optional[List[int] | np.ndarray]
+ A list or one-dimensional array of camera indices for the images in
+ `data_x`. If not None, CMC galleries are created such that image pairs
+ are collected from different cameras.
+ log_dir: str
+ Should be equivalent to the `log_dir` passed into `train_loop` of the
+ training run to monitor.
+ eval_log_dir:
+ Used to construct the tensorboard log directory where metrics are
+ summarized.
+ image_shape : Tuple[int, int, int] | NoneType
+ Image shape (height, width, channels) or None. If None, `train_x` must
+ be an array of images such that the shape can be queries from this
+ variable.
+ run_id : str
+ A string that identifies the training run; must be set to the same
+ `run_id` passed into `train_loop`.
+ loss_mode : Optional[str]
+ A string that identifies the loss function used for training; must be
+ one of 'cosine-softmax', 'magnet', 'triplet'. This value defaults to
+ 'cosine-softmax'.
+ num_galleries: int
+ The number of galleries to be constructed for evaluation of CMC
+ metrics.
+ random_seed: Optional[int]
+ If not None, the NumPy random seed is fixed to this number; can be used
+ to produce the same galleries over multiple runs.
+
+ """
+ if image_shape is None:
+ # If image_shape is not set, train_x must be an image array. Here we
+ # query the image shape from the array of images.
+ assert type(data_x) == np.ndarray
+ image_shape = data_x.shape[1:]
+ elif type(data_x) == np.ndarray:
+ assert data_x.shape[1:] == image_shape
+ read_from_file = type(data_x) != np.ndarray
+
+ # Create num_galleries random CMC galleries to average CMC top-k over.
+ probes, galleries = [], []
+ for i in range(num_galleries):
+ probe_indices, gallery_indices = util.create_cmc_probe_and_gallery(
+ data_y, camera_indices, seed=random_seed + i)
+ probes.append(probe_indices)
+ galleries.append(gallery_indices)
+ probes, galleries = np.asarray(probes), np.asarray(galleries)
+
+ # Set up the data feed.
+ with tf.device('/cpu:0'):
+ # Feed probe and gallery indices to the trainer.
+ num_probes, num_gallery_images = probes.shape[1], galleries.shape[1]
+ probe_idx_var = tf.placeholder(tf.int64, (None, num_probes))
+ gallery_idx_var = tf.placeholder(tf.int64, (None, num_gallery_images))
+ trainer = queued_trainer.QueuedTrainer(
+ [probe_idx_var, gallery_idx_var])
+
+ # Retrieve indices from trainer and gather data from constant memory.
+ data_x_var = tf.constant(data_x)
+ data_y_var = tf.constant(data_y)
+
+ probe_idx_var, gallery_idx_var = trainer.get_input_vars(batch_size=1)
+ probe_idx_var = tf.squeeze(probe_idx_var)
+ gallery_idx_var = tf.squeeze(gallery_idx_var)
+
+ # Apply preprocessing.
+ probe_x_var = tf.gather(data_x_var, probe_idx_var)
+ if read_from_file:
+ # NOTE(nwojke): tf.image.decode_jpg handles various image types.
+ num_channels = image_shape[-1] if len(image_shape) == 3 else 1
+ probe_x_var = tf.map_fn(
+ lambda x: tf.image.decode_jpeg(
+ tf.read_file(x), channels=num_channels),
+ probe_x_var, dtype=tf.uint8)
+ probe_x_var = tf.image.resize_images(probe_x_var, image_shape[:2])
+ probe_x_var = tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=False),
+ probe_x_var, back_prop=False, dtype=tf.float32)
+ probe_y_var = tf.gather(data_y_var, probe_idx_var)
+
+ gallery_x_var = tf.gather(data_x_var, gallery_idx_var)
+ if read_from_file:
+ # NOTE(nwojke): tf.image.decode_jpg handles various image types.
+ num_channels = image_shape[-1] if len(image_shape) == 3 else 1
+ gallery_x_var = tf.map_fn(
+ lambda x: tf.image.decode_jpeg(
+ tf.read_file(x), channels=num_channels),
+ gallery_x_var, dtype=tf.uint8)
+ gallery_x_var = tf.image.resize_images(
+ gallery_x_var, image_shape[:2])
+ gallery_x_var = tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=False),
+ gallery_x_var, back_prop=False, dtype=tf.float32)
+ gallery_y_var = tf.gather(data_y_var, gallery_idx_var)
+
+ # Construct the network and compute features.
+ probe_and_gallery_x_var = tf.concat(
+ axis=0, values=[probe_x_var, gallery_x_var])
+ probe_and_gallery_x_var, _ = network_factory(probe_and_gallery_x_var)
+
+ num_probe = tf.shape(probe_x_var)[0]
+ probe_x_var = tf.slice(
+ probe_and_gallery_x_var, [0, 0], [num_probe, -1])
+ gallery_x_var = tf.slice(
+ probe_and_gallery_x_var, [num_probe, 0], [-1, -1])
+
+ # Set up the metrics.
+ distance_measure = (
+ metrics.cosine_distance if loss_mode == "cosine-softmax"
+ else metrics.pdist)
+
+ def cmc_metric_at_k(k):
+ return metrics.streaming_mean_cmc_at_k(
+ probe_x_var, probe_y_var, gallery_x_var, gallery_y_var,
+ k=k, measure=distance_measure)
+
+ names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
+ "Precision@%d" % k: cmc_metric_at_k(k) for k in [1, 5, 10, 20]})
+ for metric_name, metric_value in names_to_values.items():
+ tf.summary.scalar(metric_name, metric_value)
+
+ # Start evaluation loop.
+ trainer.evaluate(
+ (probes, galleries), log_dir, eval_log_dir, run_id=run_id,
+ eval_op=list(names_to_updates.values()), eval_interval_secs=60)
+
+
+def finalize(preprocess_fn, network_factory, checkpoint_path, image_shape,
+ output_filename):
+ """Finalize model, i.e., strip off training variables and only save model
+ variables to checkpoint file.
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ checkpoint_path : str
+ The checkpoint file to load.
+ image_shape : Tuple[int, int, int]
+ Image shape (height, width, channels).
+ output_filename : str
+ The checkpoint file to write.
+
+ """
+ with tf.Session(config=npu_config_proto(), graph=tf.Graph()) as session:
+ input_var = tf.placeholder(tf.uint8, (None, ) + image_shape)
+ image_var = tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=False),
+ input_var, back_prop=False, dtype=tf.float32)
+ network_factory(image_var)
+
+ loader = tf.train.Saver(slim.get_variables_to_restore())
+ loader.restore(session, checkpoint_path)
+
+ saver = tf.train.Saver(slim.get_model_variables())
+ saver.save(session, output_filename, global_step=None)
+
+
+def freeze(preprocess_fn, network_factory, checkpoint_path, image_shape,
+ output_filename, input_name="images", feature_name="features"):
+ """Write frozen inference graph that takes as input a list of images and
+ returns their feature representation.
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ checkpoint_path : str
+ The checkpoint file to load.
+ image_shape : Tuple[int, int, int]
+ Image shape (height, width, channels).
+ output_filename : str
+ Path to the file to write to.
+ input_name : Optional[str]
+ The input (image) placeholder will be given this name; defaults
+ to `images`.
+ feature_name : Optional[str]
+ The output (feature) tensor will be given this name; defaults to
+ `features`.
+
+ """
+ with tf.Session(config=npu_config_proto(), graph=tf.Graph()) as session:
+ input_var = tf.placeholder(
+ tf.uint8, (None, ) + image_shape, name=input_name)
+ image_var = tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=False),
+ input_var, back_prop=False, dtype=tf.float32)
+ features, _ = network_factory(image_var)
+ features = tf.identity(features, name=feature_name)
+
+ saver = tf.train.Saver(slim.get_variables_to_restore())
+ saver.restore(session, checkpoint_path)
+
+ output_graph_def = tf.graph_util.convert_variables_to_constants(
+ session, tf.get_default_graph().as_graph_def(),
+ [features.name.split(":")[0]])
+ with tf.gfile.GFile(output_filename, "wb") as file_handle:
+ file_handle.write(output_graph_def.SerializeToString())
+
+
+def encode(preprocess_fn, network_factory, checkpoint_path, images_or_filenames,
+ batch_size=32, session=None, image_shape=None):
+ """
+
+ Parameters
+ ----------
+ preprocess_fn : Callable[tf.Tensor] -> tf.Tensor
+ A callable that applies preprocessing to a given input image tensor of
+ dtype tf.uint8 and returns a floating point representation (tf.float32).
+ network_factory : Callable[tf.Tensor] -> (tf.Tensor, tf.Tensor)
+ A callable that takes as argument a preprocessed input image of dtype
+ tf.float32 and returns the feature representation as well as a logits
+ tensors. The logits may be set to None if not required by the loss.
+ checkpoint_path : str
+ Checkpoint file to load.
+ images_or_filenames : List[str] | np.ndarray
+ Either a list of filenames or an array of images.
+ batch_size : Optional[int]
+ Optional batch size; defaults to 32.
+ session : Optional[tf.Session]
+ Optional TensorFlow session. If None, a new session is created.
+ image_shape : Tuple[int, int, int] | NoneType
+ Image shape (height, width, channels) or None. If None, `train_x` must
+ be an array of images such that the shape can be queries from this
+ variable.
+
+ Returns
+ -------
+ np.ndarray
+
+ """
+ if image_shape is None:
+ assert type(images_or_filenames) == np.ndarray
+ image_shape = images_or_filenames.shape[1:]
+ elif type(images_or_filenames) == np.ndarray:
+ assert images_or_filenames.shape[1:] == image_shape
+ read_from_file = type(images_or_filenames) != np.ndarray
+
+ encoder_fn = _create_encoder(
+ preprocess_fn, network_factory, image_shape, batch_size, session,
+ checkpoint_path, read_from_file)
+ features = encoder_fn(images_or_filenames)
+ return features
+
+
+def _create_encoder(preprocess_fn, network_factory, image_shape, batch_size=32,
+ session=None, checkpoint_path=None, read_from_file=False):
+ if read_from_file:
+ num_channels = image_shape[-1] if len(image_shape) == 3 else 1
+ input_var = tf.placeholder(tf.string, (None, ))
+ image_var = tf.map_fn(
+ lambda x: tf.image.decode_jpeg(
+ tf.read_file(x), channels=num_channels),
+ input_var, back_prop=False, dtype=tf.uint8)
+ image_var = tf.image.resize_images(image_var, image_shape[:2])
+ else:
+ input_var = tf.placeholder(tf.uint8, (None, ) + image_shape)
+ image_var = input_var
+
+ preprocessed_image_var = tf.map_fn(
+ lambda x: preprocess_fn(x, is_training=False),
+ image_var, back_prop=False, dtype=tf.float32)
+
+ feature_var, _ = network_factory(preprocessed_image_var)
+ feature_dim = feature_var.get_shape().as_list()[-1]
+
+ if session is None:
+ session = tf.Session(config=npu_config_proto())
+ if checkpoint_path is not None:
+ tf.train.get_or_create_global_step()
+ init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
+ checkpoint_path, slim.get_model_variables())
+ session.run(init_assign_op, feed_dict=init_feed_dict)
+
+ def encoder(data_x):
+ out = np.zeros((len(data_x), feature_dim), np.float32)
+ queued_trainer.run_in_batches(
+ lambda x: session.run(feature_var, feed_dict=x),
+ {input_var: data_x}, out, batch_size)
+ return out
+
+ return encoder
+
+
+def _create_softmax_loss(feature_var, logit_var, label_var):
+ del feature_var # Unused variable
+ cross_entropy_var = slim.losses.sparse_softmax_cross_entropy(
+ logit_var, tf.cast(label_var, tf.int64))
+ tf.summary.scalar("cross_entropy_loss", cross_entropy_var)
+
+ accuracy_var = slim.metrics.accuracy(
+ tf.cast(tf.argmax(logit_var, 1), tf.int64), label_var)
+ tf.summary.scalar("classification accuracy", accuracy_var)
+
+
+def _create_magnet_loss(feature_var, logit_var, label_var, monitor_mode=False):
+ del logit_var # Unusued variable
+ magnet_loss, _, _ = losses.magnet_loss(feature_var, label_var)
+ tf.summary.scalar("magnet_loss", magnet_loss)
+ if not monitor_mode:
+ slim.losses.add_loss(magnet_loss)
+
+
+def _create_triplet_loss(feature_var, logit_var, label_var, monitor_mode=False):
+ del logit_var # Unusued variables
+ triplet_loss = losses.softmargin_triplet_loss(feature_var, label_var)
+ tf.summary.scalar("triplet_loss", triplet_loss)
+ if not monitor_mode:
+ slim.losses.add_loss(triplet_loss)
+
+
+def _create_loss(
+ feature_var, logit_var, label_var, mode, monitor_magnet=True,
+ monitor_triplet=True):
+ if mode == "cosine-softmax":
+ _create_softmax_loss(feature_var, logit_var, label_var)
+ elif mode == "magnet":
+ _create_magnet_loss(feature_var, logit_var, label_var)
+ elif mode == "triplet":
+ _create_triplet_loss(feature_var, logit_var, label_var)
+ else:
+ raise ValueError("Unknown loss mode: '%s'" % mode)
+
+ if monitor_magnet and mode != "magnet":
+ _create_magnet_loss(
+ feature_var, logit_var, label_var, monitor_mode=monitor_magnet)
+ if monitor_triplet and mode != "triplet":
+ _create_triplet_loss(
+ feature_var, logit_var, label_var, monitor_mode=True)
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_market1501.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_market1501.py
new file mode 100644
index 0000000000000000000000000000000000000000..21667df89c7cc684938e3a55fc8fe46ad96d95cf
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_market1501.py
@@ -0,0 +1,139 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import functools
+import os
+import numpy as np
+import scipy.io as sio
+import train_app
+from datasets import market1501
+from datasets import util
+import nets.deep_sort.network_definition as net
+import moxing as mox
+from npu_bridge.estimator import npu_ops
+from tensorflow.core.protobuf import rewriter_config_pb2
+import argparse
+
+
+
+
+class Market1501(object):
+
+ def __init__(self, data_url, num_validation_y=0.1, seed=1234):
+ self._data_url = data_url
+ self._num_validation_y = num_validation_y
+ self._seed = seed
+
+ def read_train(self):
+ filenames, ids, camera_indices = market1501.read_train_split_to_str(
+ self._data_url)
+ train_indices, _ = util.create_validation_split(
+ np.asarray(ids, np.int64), self._num_validation_y, self._seed)
+
+ filenames = [filenames[i] for i in train_indices]
+ ids = [ids[i] for i in train_indices]
+ camera_indices = [camera_indices[i] for i in train_indices]
+ return filenames, ids, camera_indices
+
+ def read_validation(self):
+ filenames, ids, camera_indices = market1501.read_train_split_to_str(
+ self._data_url)
+ _, valid_indices = util.create_validation_split(
+ np.asarray(ids, np.int64), self._num_validation_y, self._seed)
+
+ filenames = [filenames[i] for i in valid_indices]
+ ids = [ids[i] for i in valid_indices]
+ camera_indices = [camera_indices[i] for i in valid_indices]
+ return filenames, ids, camera_indices
+
+ def read_test(self):
+ return market1501.read_test_split_to_str(self._data_url)
+
+
+def main():
+
+
+ arg_parser = train_app.create_default_argument_parser("market1501")
+ arg_parser.add_argument(
+ "--dataset_dir", help="Path to Market1501 dataset directory.",
+ default="resources/Market-1501-v15.09.15")
+ arg_parser.add_argument(
+ "--sdk_dir", help="Path to Market1501 baseline evaluation software.",
+ default="resources/Market-1501-v15.09.15-baseline")
+ arg_parser.add_argument(
+ "--device_id", type=int, default=4, help="Device id, default is 0.")
+ args = arg_parser.parse_args()
+ dataset = Market1501(args.dataset_dir, num_validation_y=0.1, seed=1234)
+
+
+
+ if args.mode == "train":
+ train_x, train_y, _ = dataset.read_train()
+ print("Train set size: %d images, %d identities" % (
+ len(train_x), len(np.unique(train_y))))
+
+ network_factory = net.create_network_factory(
+ is_training=True, num_classes=market1501.MAX_LABEL + 1,
+ add_logits=args.loss_mode == "cosine-softmax")
+ train_kwargs = train_app.to_train_kwargs(args)
+ train_app.train_loop(
+ net.preprocess, network_factory, train_x, train_y,
+ num_images_per_id=4, image_shape=market1501.IMAGE_SHAPE,
+ **train_kwargs)
+
+ elif args.mode == "export":
+ # Export one specific model.
+ gallery_filenames, _, query_filenames, _, _ = dataset.read_test()
+
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=market1501.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ gallery_features = train_app.encode(
+ net.preprocess, network_factory, args.restore_path,
+ gallery_filenames, image_shape=market1501.IMAGE_SHAPE)
+ sio.savemat(
+ os.path.join(args.sdk_dir, "feat_test.mat"),
+ {"features": gallery_features})
+
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=market1501.MAX_LABEL + 1,
+ add_logits=False, reuse=True)
+ query_features = train_app.encode(
+ net.preprocess, network_factory, args.restore_path,
+ query_filenames, image_shape=market1501.IMAGE_SHAPE)
+ sio.savemat(
+ os.path.join(args.sdk_dir, "feat_query.mat"),
+ {"features": query_features})
+ elif args.mode == "finalize":
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=market1501.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ train_app.finalize(
+ functools.partial(net.preprocess, input_is_bgr=True),
+ network_factory, args.restore_path,
+ image_shape=market1501.IMAGE_SHAPE,
+ output_filename="./market1501.ckpt")
+ elif args.mode == "freeze":
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=market1501.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ train_app.freeze(
+ functools.partial(net.preprocess, input_is_bgr=True),
+ network_factory, args.restore_path,
+ image_shape=market1501.IMAGE_SHAPE,
+ output_filename="./market1501.pb")
+ else:
+ raise ValueError("Invalid mode argument.")
+ # 在ModelArts容器创建训练输出目录
+ model_dir = "/cache/result"
+ os.makedirs(model_dir)
+ # 训练结束后,将ModelArts容器内的训练输出拷贝到OBS
+ mox.file.copy_parallel(model_dir, arg.train_url)
+
+
+if __name__ == "__main__":
+ #(npu_sess, npu_shutdown) = init_resource()
+ main()
+
+ #shutdown_resource(npu_sess, npu_shutdown)
+ #close_session(npu_sess)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_mars.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_mars.py
new file mode 100644
index 0000000000000000000000000000000000000000..71d8356fbc8e56c34c163d11c9c52b959d0211e2
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/train_mars.py
@@ -0,0 +1,124 @@
+# vim: expandtab:ts=4:sw=4
+from npu_bridge.npu_init import *
+import functools
+import os
+import numpy as np
+import scipy.io as sio
+import train_app
+from datasets import mars
+from datasets import util
+import nets.deep_sort.network_definition as net
+
+
+IMAGE_SHAPE = 128, 64, 3
+
+
+class Mars(object):
+
+ def __init__(self, dataset_dir, num_validation_y=0.1, seed=1234):
+ self._dataset_dir = dataset_dir
+ self._num_validation_y = num_validation_y
+ self._seed = seed
+
+ def read_train(self):
+ filenames, ids, camera_indices, _ = mars.read_train_split_to_str(
+ self._dataset_dir)
+ train_indices, _ = util.create_validation_split(
+ np.asarray(ids, np.int64), self._num_validation_y, self._seed)
+
+ filenames = [filenames[i] for i in train_indices]
+ ids = [ids[i] for i in train_indices]
+ camera_indices = [camera_indices[i] for i in train_indices]
+ return filenames, ids, camera_indices
+
+ def read_validation(self):
+ filenames, ids, camera_indices, _ = mars.read_train_split_to_str(
+ self._dataset_dir)
+ _, valid_indices = util.create_validation_split(
+ np.asarray(ids, np.int64), self._num_validation_y, self._seed)
+
+ filenames = [filenames[i] for i in valid_indices]
+ ids = [ids[i] for i in valid_indices]
+ camera_indices = [camera_indices[i] for i in valid_indices]
+ return filenames, ids, camera_indices
+
+ def read_test_filenames(self):
+ filename = os.path.join(self._dataset_dir, "info", "test_name.txt")
+ with open(filename, "r") as file_handle:
+ content = file_handle.read()
+ lines = content.splitlines()
+
+ image_dir = os.path.join(self._dataset_dir, "bbox_test")
+ return [os.path.join(image_dir, f[:4], f) for f in lines]
+
+
+def main():
+ arg_parser = train_app.create_default_argument_parser("mars")
+ arg_parser.add_argument(
+ "--dataset_dir", help="Path to MARS dataset directory.",
+ default="resources/MARS-evaluation-master")
+ args = arg_parser.parse_args()
+ dataset = Mars(args.dataset_dir, num_validation_y=0.1, seed=1234)
+
+ if args.mode == "train":
+ train_x, train_y, _ = dataset.read_train()
+ print("Train set size: %d images, %d identities" % (
+ len(train_x), len(np.unique(train_y))))
+
+ network_factory = net.create_network_factory(
+ is_training=True, num_classes=mars.MAX_LABEL + 1,
+ add_logits=args.loss_mode == "cosine-softmax")
+ train_kwargs = train_app.to_train_kwargs(args)
+ train_app.train_loop(
+ net.preprocess, network_factory, train_x, train_y,
+ num_images_per_id=4, image_shape=IMAGE_SHAPE, **train_kwargs)
+ elif args.mode == "eval":
+ valid_x, valid_y, camera_indices = dataset.read_validation()
+ print("Validation set size: %d images, %d identities" % (
+ len(valid_x), len(np.unique(valid_y))))
+
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=mars.MAX_LABEL + 1,
+ add_logits=args.loss_mode == "cosine-softmax")
+ eval_kwargs = train_app.to_eval_kwargs(args)
+ train_app.eval_loop(
+ net.preprocess, network_factory, valid_x, valid_y, camera_indices,
+ image_shape=IMAGE_SHAPE, num_galleries=20, **eval_kwargs)
+ elif args.mode == "export":
+ filenames = dataset.read_test_filenames()
+
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=mars.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ features = train_app.encode(
+ net.preprocess, network_factory, args.restore_path,
+ filenames, image_shape=IMAGE_SHAPE)
+ sio.savemat(
+ os.path.join(args.dataset_dir, "feat_test.mat"),
+ {"features": features})
+ elif args.mode == "finalize":
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=mars.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ train_app.finalize(
+ functools.partial(net.preprocess, input_is_bgr=True),
+ network_factory, args.restore_path, image_shape=IMAGE_SHAPE,
+ output_filename="./mars.ckpt")
+ elif args.mode == "freeze":
+ network_factory = net.create_network_factory(
+ is_training=False, num_classes=mars.MAX_LABEL + 1,
+ add_logits=False, reuse=None)
+ train_app.freeze(
+ functools.partial(net.preprocess, input_is_bgr=True),
+ network_factory, args.restore_path, image_shape=IMAGE_SHAPE,
+ output_filename="./mars.pb")
+ else:
+ raise ValueError("Invalid mode argument.")
+
+
+if __name__ == "__main__":
+ (npu_sess, npu_shutdown) = init_resource()
+ main()
+ shutdown_resource(npu_sess, npu_shutdown)
+ close_session(npu_sess)
+
diff --git a/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/vis_tools.py b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/vis_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..daae7b50d1dbc6e8a8385968e3a92e1b0fe76d33
--- /dev/null
+++ b/TensorFlow/contrib/DeepSort_ID0505_for_TensorFlow/DeepSort_ID0505_for_TensorFlow/vis_tools.py
@@ -0,0 +1,82 @@
+# vim: expandtab:ts=4:sw=4
+
+from npu_bridge.npu_init import *
+from sklearn.manifold import TSNE
+import numpy as np
+import cv2
+
+
+def gray_to_color(img):
+ if len(img.shape) == 2:
+ img = np.dstack((img, img, img))
+ return img
+
+
+def min_resize(img, size):
+ """
+ Resize an image so that it is size along the minimum spatial dimension.
+ """
+ w, h = map(float, img.shape[:2])
+ if min([w, h]) != size:
+ if w <= h:
+ img = cv2.resize(img, (int(round((h/w)*size)), int(size)))
+ else:
+ img = cv2.resize(img, (int(size), int(round((w/h)*size))))
+ return img
+
+
+def image_scatter(features, images, img_res, res=2000, cval=255):
+ """
+ Embeds images via tsne into a scatter plot.
+
+ Parameters
+ ---------
+ features: numpy array
+ Features to visualize
+ images: list or numpy array
+ Corresponding images to features. Expects float images from (0,1).
+ img_res: float or int
+ Resolution to embed images at
+ res: float or int
+ Size of embedding image in pixels
+ cval: float or numpy array
+ Background color value
+
+ Returns
+ ------
+ canvas: numpy array
+ Image of visualization
+ """
+ features = np.copy(features).astype('float64')
+ images = [gray_to_color(image) for image in images]
+ images = [min_resize(image, img_res) for image in images]
+ max_width = max([image.shape[0] for image in images])
+ max_height = max([image.shape[1] for image in images])
+
+ model = TSNE(n_components=2, random_state=0)
+ f2d = model.fit_transform(features)
+
+ xx = f2d[:, 0]
+ yy = f2d[:, 1]
+ x_min, x_max = xx.min(), xx.max()
+ y_min, y_max = yy.min(), yy.max()
+ # Fix the ratios
+ sx = (x_max-x_min)
+ sy = (y_max-y_min)
+ if sx > sy:
+ res_x = sx/float(sy)*res
+ res_y = res
+ else:
+ res_x = res
+ res_y = sy/float(sx)*res
+
+ canvas = np.ones((res_x+max_width, res_y+max_height, 3))*cval
+ x_coords = np.linspace(x_min, x_max, res_x)
+ y_coords = np.linspace(y_min, y_max, res_y)
+ for x, y, image in zip(xx, yy, images):
+ w, h = image.shape[:2]
+ x_idx = np.argmin((x - x_coords)**2)
+ y_idx = np.argmin((y - y_coords)**2)
+ canvas[x_idx:x_idx+w, y_idx:y_idx+h] = image
+ return canvas
+